opensvc-1.8~20170412/ 0000755 0001750 0001750 00000000000 13073470116 014246 5 ustar jkelbert jkelbert opensvc-1.8~20170412/inpath.cmd 0000644 0001750 0001750 00000007117 13073467726 016241 0 ustar jkelbert jkelbert @echo off
:addPath pathVar /B
::
:: Safely appends the path contained within variable pathVar to the end
:: of PATH if and only if the path does not already exist within PATH.
::
:: If the case insensitive /B option is specified, then the path is
:: inserted into the front (Beginning) of PATH instead.
::
:: If the pathVar path is fully qualified, then it is logically compared
:: to each fully qualified path within PATH. The path strings are
:: considered a match if they are logically equivalent.
::
:: If the pathVar path is relative, then it is strictly compared to each
:: relative path within PATH. Case differences and double quotes are
:: ignored, but otherwise the path strings must match exactly.
::
:: Before appending the pathVar path, all double quotes are stripped, and
:: then the path is enclosed in double quotes if and only if the path
:: contains at least one semicolon.
::
:: addPath aborts with ERRORLEVEL 2 if pathVar is missing or undefined
:: or if PATH is undefined.
::
::------------------------------------------------------------------------
::
:: Error checking
if "%~1"=="" exit /b 2
if not defined %~1 exit /b 2
if not defined path exit /b 2
::
:: Determine if function was called while delayed expansion was enabled
setlocal
set "NotDelayed=!"
::
:: Prepare to safely parse PATH into individual paths
setlocal DisableDelayedExpansion
set "var=%path:"=""%"
set "var=%var:^=^^%"
set "var=%var:&=^&%"
set "var=%var:|=^|%"
set "var=%var:<=^<%"
set "var=%var:>=^>%"
set "var=%var:;=^;^;%"
set var=%var:""="%
set "var=%var:"=""Q%"
set "var=%var:;;="S"S%"
set "var=%var:^;^;=;%"
set "var=%var:""="%"
setlocal EnableDelayedExpansion
set "var=!var:"Q=!"
set "var=!var:"S"S=";"!"
::
:: Remove quotes from pathVar and abort if it becomes empty
set "new=!%~1:"^=!"
if not defined new exit /b 2
::
:: Determine if pathVar is fully qualified
echo("!new!"|findstr /i /r /c:^"^^\"[a-zA-Z]:[\\/][^\\/]" ^
/c:^"^^\"[\\][\\]" >nul ^
&& set "abs=1" || set "abs=0"
::
:: For each path in PATH, check if path is fully qualified and then
:: do proper comparison with pathVar. Exit if a match is found.
:: Delayed expansion must be disabled when expanding FOR variables
:: just in case the value contains !
for %%A in ("!new!\") do for %%B in ("!var!") do (
if "!!"=="" setlocal disableDelayedExpansion
for %%C in ("%%~B\") do (
echo(%%B|findstr /i /r /c:^"^^\"[a-zA-Z]:[\\/][^\\/]" ^
/c:^"^^\"[\\][\\]" >nul ^
&& (if %abs%==1 if /i "%%~sA"=="%%~sC" exit /b 0) ^
|| (if %abs%==0 if /i %%A==%%C exit /b 0)
)
)
::
:: Build the modified PATH, enclosing the added path in quotes
:: only if it contains ;
setlocal enableDelayedExpansion
if "!new:;=!" neq "!new!" set new="!new!"
if /i "%~2"=="/B" (set "rtn=!new!;!path!") else set "rtn=!path!;!new!"
::
:: rtn now contains the modified PATH. We need to safely pass the
:: value accross the ENDLOCAL barrier
::
:: Make rtn safe for assignment using normal expansion by replacing
:: % and " with not yet defined FOR variables
set "rtn=!rtn:%%=%%A!"
set "rtn=!rtn:"=%%B!"
::
:: Escape ^ and ! if function was called while delayed expansion was enabled.
:: The trailing ! in the second assignment is critical and must not be removed.
if not defined NotDelayed set "rtn=!rtn:^=^^^^!"
if not defined NotDelayed set "rtn=%rtn:!=^^^!%" !
::
:: Pass the rtn value accross the ENDLOCAL barrier using FOR variables to
:: restore the % and " characters. Again the trailing ! is critical.
for /f "usebackq tokens=1,2" %%A in ('%%^ ^"') do (
endlocal & endlocal & endlocal & endlocal & endlocal
set "path=%rtn%" !
)
exit /b 0 opensvc-1.8~20170412/svcmon.cmd 0000644 0001750 0001750 00000000110 13073467726 016245 0 ustar jkelbert jkelbert @echo off
call osvcenv.cmd
"%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\svcmon" %* opensvc-1.8~20170412/nodemgr.cmd 0000644 0001750 0001750 00000000111 13073467726 016374 0 ustar jkelbert jkelbert @echo off
call osvcenv.cmd
"%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\nodemgr" %* opensvc-1.8~20170412/svcmgr.cmd 0000644 0001750 0001750 00000000110 13073467726 016241 0 ustar jkelbert jkelbert @echo off
call osvcenv.cmd
"%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\svcmgr" %* opensvc-1.8~20170412/bin/ 0000755 0001750 0001750 00000000000 13073470034 015015 5 ustar jkelbert jkelbert opensvc-1.8~20170412/bin/init/ 0000755 0001750 0001750 00000000000 13073467726 015776 5 ustar jkelbert jkelbert opensvc-1.8~20170412/bin/init/opensvc.agent.xml 0000644 0001750 0001750 00000004061 13073467726 021273 0 ustar jkelbert jkelbert
OpenSVC agent
OpenSVC automation and configuration management agent
opensvc-1.8~20170412/bin/init/opensvc.init.Darwin 0000755 0001750 0001750 00000001713 13073467726 021570 0 ustar jkelbert jkelbert #!/bin/bash
#
# Starts the services driven by OpenSVC
#
# description: Starts the services driven by OpenSVC whose
# autostart node is this node.
# processname:
PATH=/usr/bin:/usr/sbin:$PATH
DEFAULTS="/etc/defaults/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
ipconfig waitall
echo "OpenSVC : Pushing node information"
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
echo
echo "OpenSVC : Starting Services"
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
echo "OpenSVC : Stopping Services"
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.redhat 0000755 0001750 0001750 00000001721 13073467726 021612 0 ustar jkelbert jkelbert #!/bin/bash
#
# /etc/rc.d/init.d/opensvc
#
# Starts the services driven by OpenSVC
#
# chkconfig: 2345 99 01
# description: Starts the services driven by OpenSVC whose
# autostart node is this node.
# processname:
# Source function library.
. /etc/init.d/functions
DEFAULTS="/etc/sysconfig/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
[ -d /var/lock/subsys ] && touch /var/lock/subsys/opensvc
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
rm -f /var/lock/subsys/opensvc
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.SunOS 0000755 0001750 0001750 00000001441 13073467726 021351 0 ustar jkelbert jkelbert #!/bin/sh
#
# /etc/init.d/opensvc
#
# Starts the services driven by OpenSVC
#
# description: Starts the services driven by OpenSVC whose
# autostart node is this node.
# processname:
DEFAULTS="/etc/default/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.AIX 0000755 0001750 0001750 00000001141 13073467726 020760 0 ustar jkelbert jkelbert #!/bin/ksh
DEFAULTS="/etc/default/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/darwin.com.opensvc.svcmgr.plist 0000644 0001750 0001750 00000001125 13073467726 024067 0 ustar jkelbert jkelbert
Label
com.opensvc.svcmgr
ProgramArguments
/usr/share/opensvc/bin/init/opensvc.init.Darwin
start
RunAtLoad
StandardErrorPath
/var/log/opensvc/svcmgr_boot_stderr.log
StandardOutPath
/var/log/opensvc/svcmgr_boot_stdout.log
opensvc-1.8~20170412/bin/init/opensvc.init.FreeBSD 0000755 0001750 0001750 00000001304 13073467726 021552 0 ustar jkelbert jkelbert #!/bin/sh
#
# PROVIDE: opensvc
# REQUIRE: LOGIN sshd cleanvar
# BEFORE:
# KEYWORD: shutdown
DEFAULTS="/etc/defaults/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
faststart)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
faststop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.hpux 0000755 0001750 0001750 00000002070 13073467726 021325 0 ustar jkelbert jkelbert #!/bin/sh
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
DEFAULTS="/etc/rc.config.d/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start_msg)
if [ "$RUN_OPENSVC" -ne 0 ] ; then
echo "Starting opensvc services"
fi
;;
start)
if [ "$RUN_OPENSVC" -ne 0 ] ; then
echo "Starting opensvc services"
else
exit 0
fi
${OSVC_ROOT_PATH}/bin/nodemgr collect stats
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop_msg)
if [ "$RUN_OPENSVC" -ne 0 ] ; then
echo "Shutting down opensvc services"
fi
;;
stop)
if [ "$RUN_OPENSVC" -ne 0 ] ; then
echo "Shutting down opensvc services"
else
exit 0
fi
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.debian 0000755 0001750 0001750 00000001533 13073467726 021566 0 ustar jkelbert jkelbert #!/bin/bash
### BEGIN INIT INFO
# Provides: opensvc
# Required-Start: $all
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: OpenSVC services startup script
### END INIT INFO
DEFAULTS="/etc/default/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.OSF1 0000755 0001750 0001750 00000001154 13073467726 021053 0 ustar jkelbert jkelbert #!/usr/bin/ksh
DEFAULTS="/etc/default/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/opensvc.init.suse 0000755 0001750 0001750 00000001537 13073467726 021327 0 ustar jkelbert jkelbert #!/bin/sh -e
### BEGIN INIT INFO
# Provides: opensvc
# Required-Start: $all
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: OpenSVC services startup script
### END INIT INFO
DEFAULTS="/etc/sysconfig/opensvc"
OSVC_BOOT_OPTS="--parallel"
OSVC_ROOT_PATH="/usr/share/opensvc"
# Include opensvc defaults if available
[ -r "$DEFAULTS" ] && . "$DEFAULTS"
# Compat
[ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts}
[ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background}
allservices=${OSVC_ROOT_PATH}/bin/svcmgr
case $1 in
start)
${OSVC_ROOT_PATH}/bin/nodemgr pushasset
[ "${OSVC_BACKGROUND}" == "true" ] && {
${allservices} ${OSVC_BOOT_OPTS} boot &
} || {
${allservices} ${OSVC_BOOT_OPTS} boot
}
;;
stop)
${allservices} ${OSVC_BOOT_OPTS} shutdown
;;
esac
opensvc-1.8~20170412/bin/init/systemd.opensvc-agent.service 0000644 0001750 0001750 00000000716 13073467726 023624 0 ustar jkelbert jkelbert [Unit]
Description=OpenSVC Agent
Documentation=http://docs.opensvc.com/ file:/usr/share/doc/opensvc/
After=network.target network-online.target
[Service]
Type=idle
Environment="PATH=/opt/opensvc/bin:/opt/opensvc/etc:/etc/opensvc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
ExecStart=/usr/share/opensvc/bin/init/opensvc.init start
ExecStop=/usr/share/opensvc/bin/init/opensvc.init stop
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
opensvc-1.8~20170412/bin/opensvc 0000755 0001750 0001750 00000002444 13073467726 016442 0 ustar jkelbert jkelbert #!/bin/sh
# variables users can override in the defaults file
OSVC_ROOT_PATH="/usr/share/opensvc"
OSVC_PYTHON="python"
OSVC_PYTHON_ARGS=""
if [ -r "/etc/defaults/opensvc" ]
then
# FreeBSD, Darwin
. "/etc/defaults/opensvc"
elif [ -r "/etc/default/opensvc" ]
then
# Debian-like, Tru64, SunOS and HP-UX
. "/etc/default/opensvc"
elif [ -r "/etc/sysconfig/opensvc" ]
then
# Red Hat-like
. "/etc/sysconfig/opensvc"
elif [ -r "/etc/rc.config.d/opensvc" ]
then
# AIX
. "/etc/rc.config.d/opensvc"
fi
BASENAME=`basename $0`
case $BASENAME in
opensvc)
#
# Use me as the shebang for python modules to be garantied the
# same python requirements than the agent are met (ie 2.6+).
#
# Example: #!/usr/bin/env opensvc
#
exec "$OSVC_PYTHON" "$@"
;;
*.cluster)
BIN_ARGS="--cluster"
BASENAME=`echo $BASENAME|sed -e "s/.cluster$//"`
;;
*.stonith)
set --
BIN_ARGS="stonith --cluster"
BASENAME=`echo $BASENAME|sed -e "s/.stonith$//"`
;;
esac
if [ "$BASENAME" = "nodemgr" -o "$BASENAME" = "svcmgr" -o "$BASENAME" = "svcmon" ]
then
unset OSVC_SERVICE_LINK
BIN="$OSVC_ROOT_PATH/lib/$BASENAME.py"
else
# exec from a service link: add the --service parameter
OSVC_SERVICE_LINK=$BASENAME
export OSVC_SERVICE_LINK
BIN="$OSVC_ROOT_PATH/lib/svcmgr.py"
fi
"$OSVC_PYTHON" $OSVC_PYTHON_ARGS "$BIN" $BIN_ARGS "$@"
opensvc-1.8~20170412/bin/svcmgr 0000777 0001750 0001750 00000000000 13073467726 017654 2opensvc ustar jkelbert jkelbert opensvc-1.8~20170412/bin/postinstall 0000755 0001750 0001750 00000113351 13073467726 017341 0 ustar jkelbert jkelbert #!/usr/bin/env python
import os
import errno
import shutil
import glob
import sys
import tempfile
import inspect
try:
sysname, nodename, x, x, machine = os.uname()
except:
import platform
sysname, nodename, x, x, machine, x = platform.uname()
postinstall_d = sys.path[0]
if '/catalog/' in postinstall_d:
# hpux packaging subsystem executes the postinstall from dir
# /var/tmp/XXXXXXXXX/catalog/opensvc/commands/
postinstall_d = "/usr/share/opensvc/bin"
lsb = True
elif postinstall_d == "/usr/share/opensvc/bin":
lsb = True
else:
# windows install or unix execution from a non-lsb tree (ex: /opt/opensvc/)
lsb = False
if lsb:
pathsbin = "/usr/bin"
pathsvc = None
pathetc = "/etc/opensvc"
pathvar = "/var/lib/opensvc"
pathlck = '/var/lib/opensvc/lock'
pathtmp = "/var/tmp/opensvc"
pathlog = "/var/log/opensvc"
pathbin = "/usr/share/opensvc/bin"
pathlib = "/usr/share/opensvc/lib"
pathini = "/usr/share/opensvc/bin/init"
pathusr = None
else:
pathsbin = postinstall_d
pathsvc = os.path.realpath(os.path.join(pathsbin, '..'))
pathetc = os.path.join(pathsvc, 'etc')
pathvar = os.path.join(pathsvc, 'var')
pathlck = os.path.join(pathvar, 'lock')
pathtmp = os.path.join(pathsvc, 'tmp')
pathlog = os.path.join(pathsvc, 'log')
pathbin = postinstall_d
pathlib = os.path.join(pathsvc, 'lib')
pathini = os.path.join(pathsvc, 'bin', 'init')
pathusr = os.path.join(pathsvc, 'usr')
def make_sure_path_exists(path):
try:
os.makedirs(path, 0755)
except OSError, exception:
if exception.errno != errno.EEXIST:
raise
def logit(msg,stdout=False,stderr=False):
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
try:
import datetime
timestamp = str(datetime.datetime.now())
except:
timestamp = '?'
osvlog = os.path.join(pathlog, 'postinstall.log')
content = '['+calframe[1][3]+']['+timestamp+'] '+msg
f = open(osvlog, 'a')
f.write(content+'\n')
f.close()
if stdout:
print msg
if stderr:
print >>sys.stderr, "error ==> " + msg
make_sure_path_exists(pathlog)
logit("\nStarting OpenSVC postinstall\n",stdout=True)
SolarisRootRelocate = False
if sysname == 'SunOS' and "PKG_INSTALL_ROOT" in os.environ and os.environ['PKG_INSTALL_ROOT'] != '/':
logit("SunOS PKG_INSTALL_ROOT <%s>"%(os.environ['PKG_INSTALL_ROOT']))
SolarisRootRelocate = True
variables = {
"pathsvc": pathsvc,
"pathsbin": pathsbin,
"pathbin": pathbin,
"pathetc": pathetc,
"pathvar": pathvar,
"pathtmp": pathtmp,
"pathlib": pathlib,
"pathlog": pathlog,
"pathusr": pathusr,
"SolarisRootRelocate": SolarisRootRelocate
}
for key in variables:
logit("var %s <%s>"%(key,variables[key]))
def install_cron():
logit("begin")
if sysname == 'Windows':
logit("windows not applicable")
return
else:
return install_cron_unix()
def install_cron_windows():
logit("begin")
logit("install OsvcSched service",stdout=True)
schedstop()
schedremove()
schedinstall()
schedstart()
def schedremove():
logit("begin")
cmd = ' remove'
schedcmd(cmd)
def schedstart():
logit("begin")
cmd = ' start'
schedcmd(cmd)
def schedstop():
logit("begin")
cmd = ' stop'
schedcmd(cmd)
def schedinstall():
logit("begin")
cmd = ''
cmd += ' --username LocalSystem'
cmd += ' --startup auto'
cmd += ' install\n'
schedcmd(cmd)
def schedcmd(_cmd):
logit("begin")
logit("_cmd %s"%_cmd)
rc = '"'+sys.executable+'" "'+os.path.join(pathlib, 'rcWinScheduler.py')+'"'
cmd = "@echo off\n"
cmd += rc
cmd += _cmd
fd, fname = tempfile.mkstemp(dir=pathtmp, suffix='.cmd')
f = os.fdopen(fd, 'w')
f.write(cmd)
f.close()
import subprocess
subprocess.call([fname])
os.unlink(fname)
def save_file(infile):
logit("begin")
logit("infile <%s>"%infile)
if not os.path.exists(infile):
return True
try:
import datetime
timestamp = str(datetime.datetime.now())
tmp = timestamp.replace(" ", ".")
ts = tmp.replace(":", ".")
except:
ts = 'opensvc.postinstall'
ofname = os.path.basename(infile)
logit("ofname <%s>"%ofname)
nfname = ofname + '.crontab.' + ts
logit("nfname <%s>"%nfname)
outfile = os.path.join(os.sep, pathtmp, nfname)
logit("outfile <%s>"%outfile)
logit("saving file <%s> to <%s>"%(infile,outfile),stdout=True)
try:
shutil.copyfile(infile, outfile)
except:
import traceback
traceback.print_exc()
logit("error while trying to save file <%s> to <%s>"%(infile,outfile),stderr=True)
return False
return True
def install_cron_unix():
logit("begin")
"""install opensvc cron jobs
"""
nodemgr = os.path.join(pathsbin, 'nodemgr')
ce = [{
'sched': "* * * * *",
'reset_sched': True,
'user': "",
'cmd': "[ -x "+nodemgr+" ] && "+nodemgr+" schedulers >/dev/null 2>&1",
'marker': nodemgr + ' schedulers',
'ok': False
}]
remove_entries = [
'bin/nodemgr compliance check',
'bin/svcmon ',
'bin/cron/opensvc',
'svcmgr resource monitor',
'svcmgr resource_monitor',
'nodemgr cron',
'perfagt.'+sysname,
]
purge = []
root_crontab = False
""" order of preference
"""
if sysname == 'SunOS' :
if SolarisRootRelocate is True:
suncron = os.environ["PKG_INSTALL_ROOT"] + '/var/spool/cron/crontabs/root'
root_crontab_locs = [
suncron
]
else:
root_crontab_locs = [ '/var/spool/cron/crontabs/root' ]
else:
root_crontab_locs = [
'/etc/cron.d/opensvc',
'/var/spool/cron/crontabs/root',
'/var/spool/cron/root',
'/var/cron/tabs/root',
'/usr/lib/cron/tabs/root',
]
for loc in root_crontab_locs:
logit("looping crontab location <%s>"%loc)
if os.path.exists(os.path.dirname(loc)):
if not root_crontab:
root_crontab = loc
logit("identifying <%s> as root crontab"%root_crontab)
if root_crontab == '/etc/cron.d/opensvc':
logit("assigning root:root as crontab owner")
ce[0]['user'] = "root"
elif os.path.exists(loc):
logit("adding <%s> to purge table"%loc)
purge.append(loc)
if not root_crontab:
logit("no root crontab found in usual locations <%s>"%str(root_crontab_locs),stderr=True)
return False
ce[0]['full'] = ' '.join([ce[0]['sched'], ce[0]['user'], ce[0]['cmd']])
logit("osvcagt crontab entry <%s>"%ce[0]['full'])
new = False
if os.path.exists(root_crontab):
try:
f = open(root_crontab, 'r')
new = f.readlines()
f.close()
logit("loaded crontab <%s> content <%s>"%(root_crontab,new))
except:
f.close()
import traceback
traceback.print_exc()
i = -1
for line in new:
i += 1
for c in ce:
if c['full'] is None:
continue
if line.find(c['marker']) != -1:
if line.strip().startswith("#"):
continue
if c['ok']:
new[i] = ""
continue
if c['reset_sched']:
sched = c['sched']
else:
sched = ' '.join(line.split()[:5])
new[i] = ' '.join([sched, c['user'], c['cmd']])+'\n'
c['ok'] = True
for c in ce:
if c['full'] is not None and not c['ok']:
new.append(c['full']+'\n')
else:
new = []
for c in ce:
if c['full'] is not None and not c['ok']:
new.append(c['full']+'\n')
logit("no crontab <%s>. building new content <%s>"%(root_crontab,new))
if not new:
logit("problem preparing the new crontab",stderr=True)
return False
i = -1
for line in new:
i += 1
for re in remove_entries:
logit("looping re <%s>"%re)
if line.find(re) != -1:
logit("delete line <%s> from <%s>"%(re,root_crontab))
del new[i]
logit("saving crontab <%s>"%root_crontab)
try:
save_file(root_crontab)
except:
logit('Error while trying to backup crontab <%s>. skipping crontab update'%(root_crontab),stderr=True)
return False
logit("updating crontab <%s> with content <%s>"%(root_crontab,new))
try:
f = open(root_crontab, 'w')
f.write(''.join(new))
f.close()
except:
logit("error while trying to update crontab %s"%root_crontab,stderr=True)
f.close()
import traceback
traceback.print_exc()
""" Activate changes (actually only needed on HP-UX)
"""
if sysname in ("HP-UX", "SunOS") and root_crontab.find('/var/spool/') != -1:
logit("crontab activation requested")
cmd = ['crontab', root_crontab]
ret = os.system(' '.join(cmd))
for loc in purge:
try:
f = open(loc, 'r')
new = [ line for line in f.readlines() if line.find('opensvc.daily') == -1 and line.find('svcmon --updatedb') == -1 ]
f.close()
f = open(loc, 'w')
f.write(''.join(new))
f.close()
except:
f.close()
import traceback
traceback.print_exc()
""" Clean up old standard file locations
"""
for f in ['/etc/cron.daily/opensvc', '/etc/cron.daily/opensvc.daily']:
if os.path.exists(f):
logit("removing %s"%f)
os.unlink(f)
def activate_chkconfig(svc):
logit("begin")
cmd = ['chkconfig', '--add', svc]
ret = os.system(' '.join(cmd))
if ret > 0:
return False
return True
def activate_systemd(launcher):
logit("begin")
systemdsvc = 'opensvc-agent.service'
# populate systemd tree with opensvc unit file
src = os.path.join(pathini, 'systemd.opensvc-agent.service')
dst = os.path.join('/etc/systemd/system/', systemdsvc)
logit("installing systemd unit file",stdout=True)
try:
shutil.copyfile(src, dst)
os.chmod(dst, 0644)
except:
logit("issue met while trying to install systemd unit file",stderr=True)
# add symlink to resolve systemd service call
systemd_call = os.path.join(pathini, "opensvc.init")
if not os.path.islink(systemd_call):
if os.path.exists(systemd_call):
logit("removing %s"%systemd_call)
os.unlink(systemd_call)
msg = "create link %s -> %s"%(systemd_call, launcher)
logit(msg)
try:
os.symlink(launcher, systemd_call)
except:
logit("issue met while trying to create %s symlink" % system_call,stderr=True)
# set systemd call as ExecStart and ExecStop
os.system("sed -i 's@/usr/share/opensvc/bin/init/opensvc.init@"+systemd_call+"@' "+dst)
# reload systemd configuration
logit("reloading systemd configuration",stdout=True)
cmd = ['systemctl', '-q', 'daemon-reload']
ret = os.system(' '.join(cmd))
if ret > 0:
logit("issue met during systemctl reload",stderr=True)
# enable opensvc agent startup through systemd
logit("enabling systemd configuration")
cmd = ['systemctl', '-q', 'enable', systemdsvc]
ret = os.system(' '.join(cmd))
if ret > 0:
logit("issue met during systemctl enable",stderr=True)
def systemd_mgmt():
logit("begin")
cmd = ['systemctl', '--version', '>>/dev/null', '2>&1']
ret = os.system(' '.join(cmd))
if ret > 0:
return False
return True
def activate_ovm(launcher):
logit("begin")
activate_chkconfig('zopensvc')
def activate_redhat(launcher):
logit("begin")
activate_chkconfig('opensvc')
def activate_debian(launcher):
logit("begin")
cmd = ['update-rc.d', '-f', 'opensvc', 'remove']
ret = os.system(' '.join(cmd))
if ret > 0:
logit("issue met while trying to remove opensvc rc launchers",stderr=True)
return False
cmd = ['update-rc.d', 'opensvc', 'defaults']
ret = os.system(' '.join(cmd))
if ret > 0:
logit("issue met while trying to install opensvc rc launchers",stderr=True)
return False
return True
def activate_hpux(launcher):
logit("begin")
rc = "/sbin/init.d/opensvc"
links = ["/sbin/rc1.d/K010opensvc", "/sbin/rc2.d/K010opensvc", "/sbin/rc3.d/S990opensvc"]
if os.path.exists("/sbin/rc2.d/S990opensvc"):
logit("removing /sbin/rc2.d/S990opensvc")
os.unlink("/sbin/rc2.d/S990opensvc")
for l in links:
if not os.path.islink(l):
if os.path.exists(l):
logit("removing %s"%l)
os.unlink(l)
logit("create link %s -> %s"%(l,rc))
os.symlink(rc, l)
try:
f = open("/etc/rc.config.d/opensvc", "w")
f.write("RUN_OPENSVC=1\n")
f.close()
except:
logit("issue met while trying to install rc.config.d opensvc file",stderr=True)
f.close()
import traceback
traceback.print_exc()
return True
def activate_AIX(launcher):
logit("begin")
rc = "/etc/rc.d/init.d/opensvc"
links = ["/etc/rc.d/rc2.d/S990opensvc"]
for l in links:
if not os.path.islink(l):
if os.path.exists(l):
logit("removing %s"%l)
os.unlink(l)
logit("create link %s -> %s"%(l,rc))
os.symlink(rc, l)
return True
def activate_OSF1(launcher):
logit("begin")
rc = "/sbin/init.d/opensvc"
links = ["/sbin/rc0.d/K010opensvc", "/sbin/rc2.d/K010opensvc", "/sbin/rc3.d/S990opensvc"]
for l in links:
if not os.path.islink(l):
if os.path.exists(l):
logit("removing %s"%l)
os.unlink(l)
logit("symlinking %s and %s"%(rc,l))
os.symlink(rc, l)
return True
def activate_SunOS(launcher):
logit("begin")
if SolarisRootRelocate is True:
rc = "/etc/init.d/opensvc"
links = [os.environ["PKG_INSTALL_ROOT"] + "/etc/rc0.d/K00opensvc", os.environ["PKG_INSTALL_ROOT"] + "/etc/rc3.d/S99opensvc"]
else:
rc = "/etc/init.d/opensvc"
links = ["/etc/rc0.d/K00opensvc", "/etc/rc3.d/S99opensvc"]
logit("rc <%s>"%rc)
for l in links:
logit("link <%s>"%l)
if not os.path.islink(l):
if os.path.exists(l):
logit("removing %s"%l)
os.unlink(l)
logit("symlinking %s and %s"%(rc,l))
os.symlink(rc, l)
return True
def activate_FreeBSD(launcher):
logit("begin")
return True
def activate_Darwin(launcher):
logit("begin")
return True
def update_file(filename, srctext, replacetext):
logit("begin")
""" replace into filename srctext by replacetext
"""
import fileinput
for line in fileinput.input(filename, inplace=1):
if line.rstrip('\n') == srctext.rstrip('\n') :
line = replacetext
msg=line.rstrip('\n')
logit(msg,stdout=True)
fileinput.close()
def install_params(path2file):
logit("begin")
""" install template file with tunable variables
"""
if os.path.exists(path2file):
logit("file %s already present"%path2file)
return
try:
f = open(path2file, "w")
except:
import traceback
traceback.print_exc()
else:
logit("writing new <%s>"%path2file)
f.write("# OpenSVC startup and wrapper configuration file\n")
f.write("#\n")
f.write("# You may need to adapt parameters to fit your environment\n")
f.write("# This file is not modified during software upgrades\n")
f.write("# If empty, default settings are used in the init script\n\n\n")
f.write("\n")
f.write("#\n")
f.write("# Arguments passed to the 'svcmgr boot' command at system boot\n")
f.write("#\n")
f.write("#OSVC_BOOT_OPTS=\n")
f.write("\n")
f.write("#\n")
f.write("# If set to true, the OpenSVC launcher will start in the\n")
f.write("# background, avoiding timeouts in init managers. The default\n")
f.write("# is to launch services in the foreground.\n")
f.write("#\n")
f.write("#OSVC_BACKGROUND=true\n")
f.write("\n")
f.write("#\n")
f.write("# Wrapper configuration\n")
f.write("#\n")
f.write("#OSVC_ROOT_PATH=/opt/opensvc\n")
f.write("#OSVC_PYTHON=python\n")
f.write("#LD_LIBRARY_PATH=\n")
f.write("#LD_PRELOAD=\n")
f.write("\n")
f.close()
def install_rc():
logit("begin")
"""install startup script
"""
params = None
copyrc = True
if os.path.exists('/etc/debian_version'):
rc = '/etc/init.d/opensvc'
params = '/etc/default/opensvc'
src = os.path.join(pathini, 'opensvc.init.debian')
if systemd_mgmt():
logit("debian with systemd")
copyrc = False
activate = activate_systemd
else:
logit("debian with update-rc.d (rely on insserv)")
activate = activate_debian
elif os.path.exists('/etc/SuSE-release'):
rc = '/etc/init.d/opensvc'
params = '/etc/sysconfig/opensvc'
src = os.path.join(pathini, 'opensvc.init.suse')
if systemd_mgmt():
logit("SuSE with systemd")
copyrc = False
activate = activate_systemd
else:
logit("SuSE with chkconfig (rely on insserv)")
activate = activate_redhat
elif os.path.exists('/etc/redhat-release'):
params = '/etc/sysconfig/opensvc'
src = os.path.join(pathini, 'opensvc.init.redhat')
try:
f = open('/etc/redhat-release', 'r')
buff = f.read()
f.close()
except:
buff = ""
if buff.find('Oracle VM server') != -1:
rc = '/etc/init.d/zopensvc'
activate = activate_ovm
else:
rc = '/etc/init.d/opensvc'
if systemd_mgmt():
logit("Red Hat with systemd")
copyrc = False
activate = activate_systemd
else:
logit("Red Hat with chkconfig (rely on insserv)")
activate = activate_redhat
elif sysname == "HP-UX":
rc = '/sbin/init.d/opensvc'
src = os.path.join(pathini, 'opensvc.init.hpux')
activate = activate_hpux
elif sysname == "SunOS":
if SolarisRootRelocate is True:
rc = os.environ["PKG_INSTALL_ROOT"] + '/etc/init.d/opensvc'
params = os.environ["PKG_INSTALL_ROOT"] + '/etc/default/opensvc'
src = os.environ["PKG_INSTALL_ROOT"] + os.path.join(pathini, 'opensvc.init.SunOS')
else:
rc = '/etc/init.d/opensvc'
params = '/etc/default/opensvc'
src = os.path.join(pathini, 'opensvc.init.SunOS')
activate = activate_SunOS
elif sysname == "OSF1":
rc = '/sbin/init.d/opensvc'
src = os.path.join(pathini, 'opensvc.init.OSF1')
activate = activate_OSF1
elif sysname == "FreeBSD":
rc = '/etc/rc.d/opensvc'
params = '/etc/defaults/opensvc'
src = os.path.join(pathini, 'opensvc.init.FreeBSD')
activate = activate_FreeBSD
elif sysname == "AIX":
rc = '/etc/rc.d/init.d/opensvc'
src = os.path.join(pathini, 'opensvc.init.AIX')
activate = activate_AIX
elif sysname == "Darwin":
rc = '/Library/LaunchDaemons/com.opensvc.svcmgr.plist'
params = '/etc/defaults/opensvc'
src = os.path.join(pathini, 'darwin.com.opensvc.svcmgr.plist')
activate = activate_Darwin
elif sysname == 'Windows':
return False
else:
logit("could not select an init script: unsupported operating system",stderr=True)
return False
if os.path.islink(rc):
logit("removing link %s"%rc)
os.unlink(rc)
if copyrc:
logit("copying src launcher script to rc")
shutil.copyfile(src, rc)
os.chmod(rc, 0755)
if params is not None and not os.path.exists(params):
logit("installing default parameters file")
install_params(params)
activate(src)
def gen_keys():
logit("begin")
if sysname == 'Windows':
return
home = os.path.expanduser("~root")
logit("home <%s>"%home)
if SolarisRootRelocate is True:
home = os.environ['PKG_INSTALL_ROOT'] + os.path.expanduser("~root")
logit("SunOS and relocatable install home is now <%s>"%home)
sshhome = os.path.join(home, ".ssh")
logit("sshhome <%s>"%sshhome)
if not os.path.exists(sshhome):
logit("create dir %s"%sshhome,stdout=True)
os.makedirs(sshhome, 0700)
priv = os.path.join(sshhome, "id_rsa")
pub = os.path.join(sshhome, "id_rsa.pub")
if os.path.exists(pub) or os.path.exists(priv):
logit("either %s or %s already exist"%(pub,priv))
return
cmd = ['ssh-keygen', '-t', 'rsa', '-b', '2048', '-P', '""', '-f', priv]
try:
ret = os.system(' '.join(cmd))
except:
logit("Error while trying to generate ssh keys")
def missing_dir(pathd):
logit("begin")
if not os.path.exists(pathd):
logit("create dir %s"%pathd,stdout=True)
os.makedirs(pathd, 0755)
def missing_dirs():
logit("begin")
missing_dir(pathlog)
missing_dir(pathtmp)
missing_dir(pathvar)
missing_dir(pathetc)
missing_dir(pathlck)
def convert_svclinks():
logit("begin")
missing_dir(pathetc)
svcmgr = os.path.join(pathsbin, 'svcmgr')
if not os.path.exists(svcmgr):
logit("%s does not exist"%svcmgr)
return 1
rcService = os.path.realpath(os.path.join(pathbin, 'rcService'))
if not os.path.exists(rcService):
logit("%s does not exist"%rcService)
return 1
for fname in os.listdir(pathetc):
fpath = os.path.join(pathetc, fname)
if not os.path.islink(fpath):
logit("%s is not a symlink"%fpath)
continue
rpath = os.path.realpath(fpath)
if rpath != rcService:
logit("%s != %s"%(rpath,rcService))
continue
logit("removing %s"%fpath)
os.unlink(fpath)
logit("create link %s -> %s"%(fpath,svcmgr))
os.symlink(svcmgr, fpath)
def move_env_to_conf():
for fpath in glob.glob(os.path.join(pathetc, "*.env")):
svcname = os.path.basename(fpath)[:-4]
new_basename = svcname+".conf"
new_fpath = os.path.join(pathetc, new_basename)
shutil.move(fpath, new_fpath)
def move_var_files_in_subdirs():
for fpath in glob.glob(os.path.join(pathvar, "last_*")):
dst = os.path.join(pathvar, "node")
if not os.path.exists(dst):
os.makedirs(dst)
fname = os.path.basename(fpath)
new_fpath = os.path.join(dst, fname)
logit("move %s to %s" % (fpath, new_fpath))
shutil.move(fpath, new_fpath)
for fpath in glob.glob(os.path.join(pathvar, "*_last_*")):
fname = os.path.basename(fpath)
svcname = fname.split("_last_")[0]
dst = os.path.join(pathvar, svcname)
if not os.path.exists(dst):
os.makedirs(dst)
fname = fname.replace(svcname+"_", "")
new_fpath = os.path.join(dst, fname)
logit("move %s to %s" % (fpath, new_fpath))
shutil.move(fpath, new_fpath)
for fpath in glob.glob(os.path.join(pathvar, "*.push")):
svcname = os.path.basename(fpath).split(".push")[0]
dst = os.path.join(pathvar, svcname)
if not os.path.exists(dst):
os.makedirs(dst)
fname = "last_pushed_env"
new_fpath = os.path.join(dst, fname)
logit("move %s to %s" % (fpath, new_fpath))
shutil.move(fpath, new_fpath)
def move_usr_to_opt():
logit("begin")
linksvc = os.path.join(os.sep, 'service')
old_pathsvc = os.path.join(os.sep, 'usr', 'local', 'opensvc')
old_pathvar = os.path.join(old_pathsvc, 'var')
old_pathetc = os.path.join(old_pathsvc, 'etc')
if os.path.exists(old_pathvar):
logit("found old var %s"%old_pathvar)
for f in glob.glob(old_pathvar+'/*'):
dst = os.path.join(pathvar, os.path.basename(f))
if os.path.exists(dst) and dst.find('host_mode') == -1:
logit("file %s already exist"%dst)
continue
if os.path.isdir(f):
logit("copying dir %s to %s"%(f,dst))
shutil.copytree(f, dst, symlinks=True)
elif os.path.islink(f):
linkto = os.readlink(f)
logit("create link %s -> %s"%(dst,linto))
os.symlink(linkto, dst)
else:
logit("copying file %s to %s"%(f,dst))
shutil.copy2(f, dst)
if os.path.exists(old_pathetc):
logit("found old etc %s"%old_pathetc)
for f in glob.glob(old_pathetc+'/*'):
dst = os.path.join(pathetc, os.path.basename(f))
if os.path.exists(dst):
logit("file %s already exist"%dst)
continue
if os.path.islink(f):
linkto = os.readlink(f)
logit("create link %s -> %s"%(dst,linto))
os.symlink(linkto, dst)
elif os.path.isdir(f):
logit("copying dir %s to %s"%(f,dst))
shutil.copytree(f, dst, symlinks=True)
else:
logit("copying file %s to %s"%(f,dst))
shutil.copy2(f, dst)
if os.path.exists(old_pathsvc):
logit("removing old_pathsvc %s"%old_pathsvc)
shutil.rmtree(old_pathsvc)
if os.path.islink(linksvc) and os.path.realpath(linksvc) == old_pathsvc:
logit("removing linksvc %s"%linksvc)
os.unlink(linksvc)
def install_etc_path():
logit("begin")
p = os.path.join(os.sep, 'etc', 'PATH')
if not os.path.exists(p):
logit("etc/PATH not found")
return
try:
logit("loading %s"%(p))
f = open(p, "r")
buff = f.read()
f.close()
except:
logit("issue met while trying to read %s"%(p),stderr=True)
return
l = buff.strip().split(":")
n = len(l)
for op in (pathbin, pathetc):
if op in l:
logit("dir %s already present in %s"%(op,p))
continue
logit("adding dir %s"%(op))
l.append(op)
if len(l) == n:
logit("nothing changed in %s"%(p))
return
try:
logit("updating %s"%(p))
f = open(p, "w")
f.write(":".join(l)+'\n')
f.close()
except:
logit("issue met while trying to write %s"%(p),stderr=True)
return
def install_profile():
logit("begin")
prof_d = os.path.join(os.sep, 'etc', 'profile.d')
prof = os.path.join(prof_d, 'opensvc.sh')
buff = "if ! echo ${PATH} | grep -q "+pathetc+"; then"+"\n"
buff = buff+" PATH=${PATH}:"+pathetc+"\n"
buff = buff+"fi\n\n"
buff = buff+"if ! echo ${PATH} | grep -qw "+pathsbin+"; then"+"\n"
buff = buff+" PATH=${PATH}:"+pathsbin+"\n"
buff = buff+"fi\n"
if not os.path.exists(prof_d):
logit("no profile directory found")
return
try:
logit("installing profile in file %s"%(prof))
f = open(prof, 'w')
f.write(buff)
f.close()
except:
logit("issue met while trying to install profile in file %s"%(prof),stderr=True)
f.close()
import traceback
traceback.print_exc()
def install_bash_completion():
logit("begin")
if pathsvc is None:
return
src = os.path.join(pathsvc, 'usr', 'share', 'bash_completion.d', 'opensvc')
ds = [os.path.join(os.sep, 'etc', 'bash_completion.d'),
os.path.join(os.sep, 'etc', 'bash', 'bash_completion.d')]
for d in ds:
dst = os.path.join(d, 'opensvc')
if not os.path.exists(d):
d = None
continue
else:
break
if d is None:
logit("no bash completion directory found")
return
try:
logit("installing bash completion file src %s to tgt %s"%(src,dst))
shutil.copyfile(src, dst)
os.chmod(dst, 0644)
except:
logit("issue met while trying to install bash completion file src %s to tgt %s"%(src,dst))
def install_link(source, target):
logit("begin")
if source == '' or target == '':
logit("bad parameters")
return False
if os.path.realpath(source) == os.path.realpath(target):
logit("link already ok")
return True
if os.path.islink(target) or os.path.exists(target):
logit("unlink %s",target)
os.unlink(target)
try:
logit("create link %s -> %s"%(target,source))
os.symlink(source,target)
except:
logit("issue met while trying to symlink src %s with tgt %s"%(source,target))
def install_pythonlink():
logit("begin")
if sysname == 'Windows':
return install_pythonlink_windows()
else:
return install_pythonlink_unix()
def install_pythonlink_windows():
logit("begin")
logit("before appending pathlib to syspath")
logit(os.environ["PATH"])
sys.path = [pathlib] + sys.path
logit("after appending pathlib to syspath")
logit(os.environ["PATH"])
from rcUtilitiesWindows import get_registry_value
logit("before reading installfolder in registry")
try:
installfolder = get_registry_value('HKEY_CURRENT_USER', 'Software\\OpenSVC', 'path')
except:
logit("issue met while trying to read path into registry HKCU/Software/OpenSVC/path",stderr=True)
sys.exit(1)
installfolder = installfolder.rstrip('\\')
logit("installfolder = <"+installfolder+">")
osvcenv = os.path.join(installfolder, 'osvcenv.cmd')
content = '@echo off\nset OSVCROOT='+installfolder+'\nset OSVCPYTHONROOT=%OSVCROOT%\python\nset PYTHONPATH=%OSVCROOT%\lib\nset OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe\ncall inpath.cmd OSVCPYTHONROOT'
logit(content)
f = open(osvcenv, 'w')
f.write(content)
f.close()
def move_host_mode():
logit("begin")
hm = os.path.join(pathvar, 'host_mode')
cf = os.path.join(pathetc, 'node.conf')
nodemgr = os.path.join(pathsbin, 'nodemgr')
if not os.path.exists(hm):
logit("file %s does not exist"%hm)
return
try:
fp = open(hm, 'r')
mode = fp.read().split()[0]
fp.close()
except:
logit("failed to read old host_mode. renamed to %s"%(hm+'.old'))
shutil.move(hm, hm+'.old')
return
cmd = [nodemgr, 'set', '--param', 'node.host_mode', '--value', mode]
ret = os.system(' '.join(cmd))
if ret != 0:
logit("failed to set host_mode in node.conf",stdout=True)
return
shutil.move(hm, hm+'.old')
def nodeconf_params():
logit("begin")
nodeconf = os.path.join(pathetc, 'node.conf')
dotnodeconf = os.path.join(pathetc, '.node.conf')
# reset etc/.node.conf (autogenerated)
if os.path.exists(dotnodeconf):
logit("unlink file %s"%dotnodeconf)
os.unlink(dotnodeconf)
if not os.path.exists(nodeconf):
logit("file %s does not exist"%nodeconf)
return
import ConfigParser
import copy
try:
config = ConfigParser.RawConfigParser()
except AttributeError:
logit("issue occured while trying to instantiate configparser")
return
config.read(nodeconf)
changed = False
# no DEFAULT in etc/node.conf
for o in copy.copy(config.defaults()):
logit("removing DEFAULT in node.conf")
config.remove_option('DEFAULT', o)
changed = True
# sync section goes to etc/.node.conf
if config.has_section('sync'):
logit("removing sync in node.conf")
config.remove_section('sync')
changed = True
for s in config.sections():
for o in config.options(s):
if o in ['sync_interval', 'push_interval', 'comp_check_interval']:
logit("looping %s"%o)
v = config.getint(s, o)
config.remove_option(s, o)
config.set(s, 'interval', v)
changed = True
if o in ['sync_days', 'push_days', 'comp_check_days']:
logit("looping %s"%o)
v = config.get(s, o)
config.remove_option(s, o)
config.set(s, 'days', v)
changed = True
if o in ['sync_period', 'push_period', 'comp_check_period']:
logit("looping %s"%o)
v = config.get(s, o)
config.remove_option(s, o)
config.set(s, 'period', v)
changed = True
if changed:
logit("writing new node.conf")
try:
fp = open(nodeconf, 'w')
config.write(fp)
fp.close()
except:
logit("failed to write new %s"%nodeconf,stderr=True)
def save_exc():
logit("begin")
import traceback
try:
import tempfile
try:
import datetime
now = str(datetime.datetime.now()).replace(' ', '-')
except:
now =""
try:
f = tempfile.NamedTemporaryFile(dir=pathtmp, prefix='exc-'+now+'-')
except:
return
f.close()
f = open(f.name, 'w')
traceback.print_exc(file=f)
logit("unexpected error. stack saved in %s"%f.name,stderr=True)
f.close()
except:
logit("unexpected error",stderr=True)
traceback.print_exc()
def purge_collector_api_cache():
logit("begin")
fname = os.path.join(pathvar, "collector")
if os.path.exists(fname) and os.path.isfile(fname):
logit("unlink file %s"%fname)
os.unlink(fname)
def chmod_directories():
logit("begin")
if not hasattr(os, "walk"):
logit("os.walk not available")
return
if sysname == 'Windows':
logit("skip : unsupported on Windows")
return
for d in (pathbin, pathlib, pathusr):
if d is None:
continue
for dirname, dirnames, filenames in os.walk(d):
for subdirname in dirnames:
dirpath = os.path.join(dirname, subdirname)
try:
os.chmod(dirpath, 0755)
msg = "setting %s permissions to 0755" % dirpath
except:
msg = "issue met while trying to set %s permissions to 0755" % dirpath
logit(msg)
def log_file_info(path):
try:
info = os.lstat(path)
except:
msg = "issue met while trying to get [%s] os.lstat information" % path
logit(msg)
return
string = "uid[%d] gid[%d] perms[%s] file[%s]" % (info.st_uid, info.st_gid, oct(info.st_mode & 0777), path)
logit(string)
def dump_install_content():
logit("begin")
if sysname == 'Windows':
logit("skip : unsupported on Windows")
return
if not hasattr(os, "walk"):
logit("os.walk not available")
return
for d in (pathbin, pathlib, pathusr):
if d is None:
continue
for dirname, dirnames, filenames in os.walk(d):
for subdirname in dirnames:
dirpath = os.path.join(dirname, subdirname)
log_file_info(dirpath)
for filename in filenames:
filepath = os.path.join(dirname, filename)
log_file_info(filepath)
def convert_to_lsb():
logit("begin")
if sysname == 'Windows':
logit("skip : unsupported on Windows")
return
if len(glob.glob(pathetc+"/*")) > 0:
logit("skip : skip convert to lsb because /etc/opensvc/ is not empty")
return
if not os.path.exists("/opt/opensvc"):
logit("skip : skip convert to lsb because /opt/opensvc/ does not exist")
return
for p in glob.glob("/opt/opensvc/etc/*conf") + glob.glob("/opt/opensvc/etc/sssu") + glob.glob("/opt/opensvc/etc/*pem") + glob.glob("/opt/opensvc/etc/*pub"):
logit("migrate " + p)
shutil.copy(p, pathetc)
for p in glob.glob("/opt/opensvc/etc/*.env"):
logit("migrate " + p)
svcname = os.path.basename(p)[:-4]
shutil.copy(os.path.realpath(p), pathetc)
os.symlink("/usr/bin/svcmgr", os.path.join(pathetc, svcname))
for p in glob.glob("/opt/opensvc/etc/*.d") + glob.glob("/opt/opensvc/etc/*.dir"):
logit("migrate " + p)
if os.path.islink(p):
bp = os.path.basename(p)
linkto = os.readlink(p)
if linkto.startswith("/opt/opensvc/etc"):
linkto.replace("/opt/opensvc/etc/", "")
dst = os.path.join(pathetc, bp)
os.symlink(linkto, dst)
elif os.path.isdir(p):
bp = os.path.basename(p)
dst = os.path.join(pathetc, bp)
shutil.copytree(p, dst, symlinks=True)
else:
shutil.copy(p, pathetc)
for p in glob.glob("/opt/opensvc/var/*"):
if os.path.basename(p) == "btrfs":
continue
logit("migrate " + p)
bp = os.path.basename(p)
dst = os.path.join(pathvar, bp)
if os.path.exists(dst):
continue
if os.path.isdir(p):
try:
shutil.copytree(p, dst, symlinks=True)
except:
# best effort for var
pass
else:
shutil.copy(p, pathvar)
try:
move_var_files_in_subdirs()
move_usr_to_opt()
missing_dirs()
convert_svclinks()
install_cron()
install_rc()
gen_keys()
install_profile()
install_etc_path()
install_bash_completion()
move_host_mode()
nodeconf_params()
purge_collector_api_cache()
chmod_directories()
convert_to_lsb()
move_env_to_conf()
dump_install_content()
logit("\nOpenSVC postinstall terminated\n",stdout=True)
except:
save_exc()
sys.exit(1)
opensvc-1.8~20170412/bin/nodemgr 0000777 0001750 0001750 00000000000 13073467726 020006 2opensvc ustar jkelbert jkelbert opensvc-1.8~20170412/bin/postinstall.cmd 0000644 0001750 0001750 00000000661 13073467726 020077 0 ustar jkelbert jkelbert @echo off
set OSVCROOT=%~1
if %OSVCROOT:~-1%==\ set OSVCROOT=%OSVCROOT:~0,-1%
set OSVCPYTHONROOT=%OSVCROOT%\python
set OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe
set PYTHONPATH=%OSVCROOT%\lib
call "%OSVCROOT%\inpath.cmd" OSVCROOT
call "%OSVCROOT%\inpath.cmd" OSVCPYTHONROOT
"%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\postinstall"
if errorlevel 1 (
echo Failure Reason Given is %errorlevel%
pause
exit /b %errorlevel%
)
exit /b 0
opensvc-1.8~20170412/bin/svcmon 0000777 0001750 0001750 00000000000 13073467726 017660 2opensvc ustar jkelbert jkelbert opensvc-1.8~20170412/var/ 0000755 0001750 0001750 00000000000 13073467726 015053 5 ustar jkelbert jkelbert opensvc-1.8~20170412/var/compliance/ 0000755 0001750 0001750 00000000000 13073467726 017165 5 ustar jkelbert jkelbert opensvc-1.8~20170412/var/compliance/com.opensvc/ 0000755 0001750 0001750 00000000000 13073467726 021417 5 ustar jkelbert jkelbert opensvc-1.8~20170412/var/compliance/com.opensvc/comp.py 0000644 0001750 0001750 00000031710 13073467726 022731 0 ustar jkelbert jkelbert #!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
import json
import base64
if sys.version_info[0] >= 3:
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from urllib.parse import urlencode
else:
from urllib2 import Request, urlopen
from urllib2 import HTTPError
from urllib import urlencode
RET_OK = 0
RET_ERR = 1
RET_NA = 2
RET = RET_OK
class NotApplicable(Exception):
pass
class Unfixable(Exception):
pass
class ComplianceError(Exception):
pass
class InitError(Exception):
pass
class EndRecursion(Exception):
pass
def pinfo(*args, **kwargs):
if is_string(args) and len(args):
return
if isinstance(args, list) and (len(args) == 0 or len(args[0]) == 0):
return
kwargs["file"] = sys.stdout
print(*args, **kwargs)
def perror(*args, **kwargs):
if is_string(args) and len(args):
return
if isinstance(args, list) and (len(args) == 0 or len(args[0]) == 0):
return
kwargs["file"] = sys.stderr
print(*args, **kwargs)
def is_string(s):
""" python[23] compatible
"""
if sys.version_info[0] == 2:
l = (str, unicode)
else:
l = (str)
if isinstance(s, l):
return True
return False
def bdecode(buff):
if sys.version_info[0] < 3:
return buff
else:
try:
return str(buff, "utf-8")
except:
return str(buff, "ascii")
return buff
def bencode(buff):
if sys.version_info[0] < 3:
return buff
else:
try:
return bytes(buff, "utf-8")
except:
return bytes(buff, "ascii")
return buff
class CompObject(object):
def __init__(self,
prefix=None,
data={}):
if prefix:
self.prefix = prefix.upper()
elif "default_prefix" in data:
self.prefix = data["default_prefix"].upper()
else:
self.prefix = "MAGIX12345"
self.extra_syntax_parms = data.get("extra_syntax_parms")
self.example_value = data.get("example_value", "")
self.example_kwargs = data.get("example_kwargs", {})
self.example_env = data.get("example_env", {})
self.description = data.get("description", "(no description)")
self.form_definition = data.get("form_definition", "(no form definition)")
self.init_done = False
def __getattribute__(self, s):
if not object.__getattribute__(self, "init_done") and s in ("check", "fix", "fixable"):
object.__setattr__(self, "init_done", True)
object.__getattribute__(self, "init")()
return object.__getattribute__(self, s)
def init(self):
pass
def test(self):
self.__init__(**self.example_kwargs)
self.prefix = "OSVC_COMP_CO_TEST"
for k, v in self.example_env.items():
self.set_env(k, v)
self.set_env(self.prefix, self.example_value)
return self.check()
def info(self):
def indent(text):
lines = text.split("\n")
return "\n".join([" "+line for line in lines])
s = ""
s += "Description\n"
s += "===========\n"
s += "\n"
s += indent(self.description)+"\n"
s += "\n"
s += "Example rule\n"
s += "============\n"
s += "\n::\n\n"
s += indent(json.dumps(json.loads(self.example_value), indent=4, separators=(',', ': ')))+"\n"
s += "\n"
s += "Form definition\n"
s += "===============\n"
s += "\n::\n\n"
s += indent(self.form_definition)+"\n"
s += "\n"
pinfo(s)
def set_prefix(self, prefix):
self.prefix = prefix.upper()
def set_env(self, k, v):
if sys.version_info[0] < 3:
v = v.decode("utf-8")
os.environ[k] = v
def get_env(self, k):
s = os.environ[k]
if sys.version_info[0] < 3:
s = s.encode("utf-8")
return s
def get_rules_raw(self):
rules = []
for k in [key for key in os.environ if key.startswith(self.prefix)]:
s = self.subst(self.get_env(k))
rules += [s]
if len(rules) == 0:
raise NotApplicable("no rules (%s)" % self.prefix)
return rules
def encode_data(self, data):
if sys.version_info[0] > 2:
return data
if type(data) == dict:
for k in data:
if isinstance(data[k], (str, unicode)):
data[k] = data[k].encode("utf-8")
elif isinstance(data[k], (list, dict)):
data[k] = self.encode_data(data[k])
elif type(data) == list:
for i, v in enumerate(data):
if isinstance(v, (str, unicode)):
data[i] = v.encode("utf-8")
elif isinstance(data[i], (list, dict)):
data[i] = self.encode_data(data[i])
return data
def get_rules(self):
return [self.encode_data(v[1]) for v in self.get_rule_items()]
def get_rule_items(self):
rules = []
for k in [key for key in os.environ if key.startswith(self.prefix)]:
try:
s = self.subst(self.get_env(k))
except Exception as e:
perror(k, e)
continue
try:
data = json.loads(s)
except ValueError:
perror('failed to concatenate', self.get_env(k), 'to rules list')
if type(data) == list:
for d in data:
rules += [(k, d)]
else:
rules += [(k, data)]
if len(rules) == 0:
raise NotApplicable("no rules (%s)" % self.prefix)
return rules
def subst(self, v):
"""
A rule value can contain references to other rules as %%ENV:OTHER%%.
This function substitutes these markers with the referenced rules values,
which may themselves contain references. Hence the recursion.
"""
max_recursion = 10
if type(v) == list:
l = []
for _v in v:
l.append(self.subst(_v))
return l
if type(v) != str and type(v) != unicode:
return v
p = re.compile('%%ENV:\w+%%', re.IGNORECASE)
def _subst(v):
matches = p.findall(v)
if len(matches) == 0:
raise EndRecursion
for m in matches:
s = m.strip("%").upper().replace('ENV:', '')
if s in os.environ:
_v = self.get_env(s)
elif 'OSVC_COMP_'+s in os.environ:
_v = self.get_env('OSVC_COMP_'+s)
else:
_v = ""
raise NotApplicable("undefined substitution variable: %s" % s)
v = v.replace(m, _v)
return v
for i in range(max_recursion):
try:
v = _subst(v)
except EndRecursion:
break
return v
def collector_api(self):
if hasattr(self, "collector_api_cache"):
return self.collector_api_cache
import platform
sysname, nodename, x, x, machine, x = platform.uname()
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
config = ConfigParser.RawConfigParser({})
if os.path.realpath(__file__).startswith("/opt/opensvc"):
config.read("/opt/opensvc/etc/node.conf")
else:
config.read("/etc/opensvc/node.conf")
data = {}
data["username"] = nodename
data["password"] = config.get("node", "uuid")
data["url"] = config.get("node", "dbopensvc").replace("/feed/default/call/xmlrpc", "/init/rest/api")
self.collector_api_cache = data
return self.collector_api_cache
def collector_url(self):
api = self.collector_api()
s = "%s:%s@" % (api["username"], api["password"])
url = api["url"].replace("https://", "https://"+s)
url = url.replace("http://", "http://"+s)
return url
def collector_request(self, path):
api = self.collector_api()
url = api["url"]
request = Request(url+path)
base64string = base64.encodestring('%s:%s' % (api["username"], api["password"])).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
return request
def collector_rest_get(self, path):
api = self.collector_api()
request = self.collector_request(path)
if api["url"].startswith("https"):
try:
import ssl
kwargs = {"context": ssl._create_unverified_context()}
except:
kwargs = {}
else:
raise ComplianceError("refuse to submit auth tokens through a non-encrypted transport")
try:
f = urlopen(request, **kwargs)
except HTTPError as e:
try:
err = json.loads(e.read())["error"]
e = ComplianceError(err)
except:
pass
raise e
import json
data = json.loads(f.read())
f.close()
return data
def collector_rest_get_to_file(self, path, fpath):
api = self.collector_api()
request = self.collector_request(path)
if api["url"].startswith("https"):
try:
import ssl
kwargs = {"context": ssl._create_unverified_context()}
except:
kwargs = {}
else:
raise ComplianceError("refuse to submit auth tokens through a non-encrypted transport")
try:
f = urlopen(request, **kwargs)
except HTTPError as e:
try:
err = json.loads(e.read())["error"]
e = ComplianceError(err)
except:
pass
raise e
with open(fpath, 'wb') as df:
for chunk in iter(lambda: f.read(4096), b""):
df.write(chunk)
f.close()
def collector_safe_uri_to_uuid(self, uuid):
if uuid.startswith("safe://"):
uuid = uuid.replace("safe://", "")
if not uuid.startswith("safe"):
raise ComplianceError("malformed safe file uri: %s" % uuid)
return uuid
def collector_safe_file_download(self, uuid, fpath):
uuid = self.collector_safe_uri_to_uuid(uuid)
self.collector_rest_get_to_file("/safe/" + uuid + "/download", fpath)
def collector_safe_file_get_meta(self, uuid):
uuid = self.collector_safe_uri_to_uuid(uuid)
data = self.collector_rest_get("/safe/" + uuid)
if len(data["data"]) == 0:
raise ComplianceError(uuid + ": metadata not found")
return data["data"][0]
def urlretrieve(self, url, fpath):
request = Request(url)
kwargs = {}
if sys.hexversion >= 0x02070900:
import ssl
kwargs["context"] = ssl._create_unverified_context()
f = urlopen(request, **kwargs)
with open(fpath, 'wb') as df:
for chunk in iter(lambda: f.read(4096), b""):
df.write(chunk)
def md5(self, fpath):
import hashlib
hash = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash.hexdigest()
def main(co):
syntax = "syntax:\n"
syntax += """ %s check|fix|fixable\n"""%sys.argv[0]
syntax += """ %s test|info"""%sys.argv[0]
try:
o = co()
except NotApplicable as e:
pinfo(e)
sys.exit(RET_NA)
if o.extra_syntax_parms:
syntax += " "+o.extra_syntax_parms
if len(sys.argv) == 2:
if sys.argv[1] == 'test':
try:
RET = o.test()
sys.exit(RET)
except ComplianceError as e:
perror(e)
sys.exit(RET_ERR)
except NotApplicable:
sys.exit(RET_NA)
elif sys.argv[1] == 'info':
o.info()
sys.exit(0)
if len(sys.argv) < 3:
perror(syntax)
sys.exit(RET_ERR)
argv = [sys.argv[1]]
if len(sys.argv) > 3:
argv += sys.argv[3:]
o.__init__(*argv)
try:
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except ComplianceError as e:
perror(e)
sys.exit(RET_ERR)
except NotApplicable as e:
pinfo(e)
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
if __name__ == "__main__":
perror("this file is for import into compliance objects")
opensvc-1.8~20170412/var/compliance/com.opensvc/timedatectl.py 0000755 0001750 0001750 00000013143 13073467726 024275 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_TIMEDATECTL_",
"example_value": """
{
"timezone": "Europe/Paris",
"ntpenabled": "no"
}
""",
"description": """* Checks timedatectl settings
* Module need to be called with the exposed target settings as variable (timedatectl.py OSVC_COMP_TIMEDATECTL_1 check)
""",
"form_definition": """
Desc: |
A timedatectl rule, fed to the 'timedatectl' compliance object to setup rhel/centos7+ timezone/ntp.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: timedatectl
Type: json
Format: dict
Inputs:
-
Id: timezone
Label: Timezone
DisplayModeLabel: timezone
LabelCss: action16
Mandatory: No
Help: 'The timezone name, as listed by "timedatectl list-timezones" command. Example: Europe/Paris'
Type: string
-
Id: ntpenabled
Label: NTP Enabled
DisplayModeLabel: ntpenabled
LabelCss: time16
Mandatory: No
Default: "yes"
Candidates:
- "yes"
- "no"
Help: "Specify yes or no, to request enabling or disabling the chronyd time service, driven through timedatectl command."
Type: string
"""
}
import os
import sys
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
from utilities import *
class CompTimeDateCtl(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.inputs = self.get_rules()[0]
if self.sysname not in ['Linux']:
perror('module not supported on', self.sysname)
raise NotApplicable()
if which('timedatectl') is None:
perror('timedatectl command not found', self.sysname)
raise NotApplicable()
self.tz = self.get_valid_tz()
self.live = self.get_current_tdctl()
def get_current_tdctl(self):
"""
[root@rhel71 averon]# timedatectl
Local time: mar. 2016-03-29 17:13:43 CEST
Universal time: mar. 2016-03-29 15:13:43 UTC
RTC time: mar. 2016-03-29 15:13:42
Time zone: Europe/Paris (CEST, +0200)
NTP enabled: yes
NTP synchronized: yes
RTC in local TZ: no
DST active: yes
Last DST change: DST began at
dim. 2016-03-27 01:59:59 CET
dim. 2016-03-27 03:00:00 CEST
Next DST change: DST ends (the clock jumps one hour backwards) at
dim. 2016-10-30 02:59:59 CEST
dim. 2016-10-30 02:00:00 CET
"""
current = {}
try:
cmd = ['timedatectl', 'status']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
out = bdecode(out)
for line in out.splitlines():
if 'Time zone:' in line:
s = line.split(':')[-1].strip()
t = s.split(' ')[0]
current['timezone'] = t
if 'NTP enabled:' in line:
current['ntpenabled'] = line.split(':')[-1].strip()
except:
perror('can not fetch timedatectl infos')
return None
return current
def get_valid_tz(self):
tz = []
try:
cmd = ['timedatectl', '--no-pager', 'list-timezones']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
out = bdecode(out)
for line in out.splitlines():
curtz = line.strip()
if curtz is not '':
tz.append(curtz)
except:
perror('can not build valid timezone list')
return None
return tz
def fixable(self):
return RET_NA
def check(self):
if self.live is None:
return RET_NA
r = RET_OK
for input in self.inputs:
r |= self._check(input)
return r
def _check(self, input):
if self.inputs[input] == self.live[input]:
pinfo("timedatectl %s is %s, on target" % (input, self.live[input] ))
return RET_OK
perror("timedatectl %s is %s, target %s" % (input, self.live[input], self.inputs[input]))
return RET_ERR
def set_tz(self, timezone):
try:
cmd = ['timedatectl', 'set-timezone', timezone]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
except:
perror('could not set timezone')
return None
return RET_OK
def set_ntp(self, value):
try:
cmd = ['timedatectl', 'set-ntp', value]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
except:
perror('could not set ntp')
return None
return RET_OK
def _fix(self, input):
r = RET_OK
if input in 'timezone':
r |= self.set_tz(self.inputs[input])
return r
if input in 'ntpenabled':
r |= self.set_ntp(self.inputs[input])
return r
return RET_NA
def fix(self):
r = RET_OK
if self.check() == RET_ERR:
for input in self.inputs:
r |= self._fix(input)
return r
def test(self):
print("Not Implemented")
if __name__ == "__main__":
main(CompTimeDateCtl)
opensvc-1.8~20170412/var/compliance/com.opensvc/chkconfig.py 0000755 0001750 0001750 00000007123 13073467726 023732 0 ustar jkelbert jkelbert #!/usr/bin/env python
from subprocess import *
import sys
import os
sys.path.append(os.path.dirname(__file__))
from comp import *
os.environ['LANG'] = 'C'
class InitError(Exception):
pass
class UnknownService(Exception):
pass
class SetError(Exception):
pass
class Chkconfig(object):
def __init__(self):
self.load()
def __str__(self):
s = ""
for svc in self.services:
s += "%-20s %s\n"%(svc, ' '.join(map(lambda x: '%-4s'%x, self.services[svc])))
return s
def load(self):
self.services = {}
p = Popen(['/sbin/chkconfig', '--list'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitError()
out = bdecode(out)
for line in out.splitlines():
words = line.split()
if len(words) != 8:
continue
self.services[words[0]] = []
for w in words[1:]:
level, state = w.split(':')
self.services[words[0]].append(state)
def load_one(self, service):
p = Popen(['/sbin/chkconfig', '--list', service], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
out = bdecode(out)
if 'not referenced' in out:
self.services[service] = ['off', 'off', 'off', 'off', 'off', 'off']
return
raise InitError()
def activate(self, service):
p = Popen(['chkconfig', service, 'on'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
def set_state(self, service, level, state):
curstate = self.get_state(service, level)
if curstate == state:
return
p = Popen(['chkconfig', '--level', level, service, state], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
def get_state(self, service, level):
if service not in self.services:
try:
self.load_one(service)
except InitError:
pass
if service not in self.services:
raise UnknownService()
return self.services[service][level]
def check_state(self, service, levels, state, seq=None, verbose=False):
r = 0
for level in levels:
try:
level = int(level)
except:
continue
try:
curstate = self.get_state(service, level)
except UnknownService:
if verbose:
perror("can not get service", service, "runlevels")
return 1
if curstate != state:
if verbose:
perror("service", service, "at runlevel", level, "is in state", curstate, "! target state is", state)
r |= 1
else:
if verbose:
pinfo("service", service, "at runlevel", level, "is in state", curstate)
return r
def fix_state(self, service, levels, state, seq=None):
cmd = ['chkconfig', '--level', levels, service, state]
pinfo("exec:", ' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror("failed to set", service, "runlevels")
pinfo(out)
perror(err)
return 1
return 0
if __name__ == "__main__":
o = Chkconfig()
pinfo(o)
pinfo('xfs@rc3 =', o.get_state('xfs', 3))
opensvc-1.8~20170412/var/compliance/com.opensvc/linux.mpath.py 0000755 0001750 0001750 00000037344 13073467726 024256 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_MPATH_",
"example_value": """
[
{
"key": "defaults.polling_interval",
"op": ">=",
"value": 20
},
{
"key": "device.{HP}.{HSV210.*}.prio",
"op": "=",
"value": "alua"
},
{
"key": "blacklist.wwid",
"value": 600600000001,
"op": "="
}
]
""",
"description": """* Setup and verify the Linux native multipath configuration
""",
"form_definition": """
Desc: |
A rule to set a list of Linux multipath.conf parameters. Current values can be checked as strictly equal, or superior/inferior to their target value.
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: linux_mpath
Inputs:
-
Id: key
Label: Key
DisplayModeTrim: 64
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: >
The multipath.conf parameter to check.
ex: defaults.polling_interval or
device.device.{HP}.{HSV210.*} or
multipaths.multipath.6006000000000000 or
blacklist.wwid or
blacklist.device.{HP}.{HSV210.*}
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The multipath.conf parameter target value.
""",
}
import os
import sys
import json
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
comment_chars = "#;"
sections_tree = {
'defaults': {},
'blacklist': {
'device': {},
},
'blacklist_exceptions': {
'device': {},
},
'devices': {
'device': {},
},
'multipaths': {
'multipath': {},
},
}
class Blacklist(object):
def __init__(self, name=""):
self.name = name
self.wwid = []
self.devnode = []
self.devices = []
def __str__(self):
s = ""
if len(self.devices) + len(self.wwid) + len(self.devnode) == 0:
return s
s += self.name + " {\n"
for wwid in self.wwid:
s += "\twwid " + str(wwid) + "\n"
for devnode in self.devnode:
s += "\tdevnode " + str(devnode) + "\n"
for device in self.devices:
s += str(device)
s += "}\n"
return s
class Section(object):
def __init__(self, name="", indent=1):
self.name = name
self.attr = {}
self.indent = ""
for i in range(indent):
self.indent += '\t'
def __str__(self):
s = ""
s += self.indent + self.name + " {\n"
for a, v in self.attr.items():
v = str(v)
if ' ' in v:
v = '"' + v + '"'
s += self.indent + "\t" + a + " " + v + "\n"
s += self.indent + "}\n"
return s
class Conf(object):
def __init__(self):
self.blacklist = Blacklist("blacklist")
self.blacklist_exceptions = Blacklist("blacklist_exceptions")
self.defaults = Section("defaults", indent=0)
self.devices = []
self.multipaths = []
self.changed = False
def __str__(self):
s = ""
s += str(self.defaults)
s += str(self.blacklist)
s += str(self.blacklist_exceptions)
if len(self.devices) > 0:
s += "devices {\n"
for device in self.devices:
s += str(device)
s += "}\n"
if len(self.multipaths) > 0:
s += "multipaths {\n"
for multipath in self.multipaths:
s += str(multipath)
s += "}\n"
return s
def set(self, key, value):
index = self.parse_key(key)
key = re.sub(r'\{([^\}]+)\}\.', '', key)
l = key.split('.')
if key.endswith('}'):
a = None
else:
a = l[-1]
if l[1] == "device":
o = self.find_device(l[0], index)
if o is None:
o = Section("device")
o.attr['vendor'] = index[0]
o.attr['product'] = index[1]
_l = self.get_device_list(l[0])
_l.append(o)
if a is not None:
o.attr[a] = value
self.changed = True
elif l[1] == "multipath":
o = self.find_multipath(index)
if o is None:
o = Section("multipath")
o.attr['wwid'] = index
self.multipaths.append(o)
o.attr[a] = value
self.changed = True
elif l[-1] == "wwid":
o = getattr(self, l[0])
o.wwid.append(str(value))
self.changed = True
elif l[-1] == "devnode":
o = getattr(self, l[0])
o.devnode.append(str(value))
self.changed = True
elif l[0] == "defaults":
self.defaults.attr[a] = value
self.changed = True
def get(self, key):
index = self.parse_key(key)
key = re.sub(r'\{([^\}]+)\}\.', '', key)
l = key.split('.')
if key.endswith('}'):
a = None
else:
a = l[-1]
if len(l) < 2:
perror("malformed key", key)
return
if l[1] == "device":
o = self.find_device(l[0], index)
if o:
if a is None:
return ""
elif a in o.attr:
return o.attr[a]
elif l[1] == "multipath":
o = self.find_multipath(index)
if o and a in o.attr:
return o.attr[a]
elif l[-1] == "wwid":
return getattr(self, l[0]).wwid
elif l[-1] == "devnode":
return getattr(self, l[0]).devnode
elif l[0] == "defaults":
if a in self.defaults.attr:
return self.defaults.attr[a]
def find_multipath(self, index):
wwid = index
for multipath in self.multipaths:
if multipath.attr['wwid'] == wwid:
return multipath
def get_device_list(self, section):
l = getattr(self, section)
if type(l) != list and hasattr(l, "devices"):
l = getattr(l, "devices")
if type(l) != list:
return
return l
def find_device(self, section, index):
vendor, product = index
l = self.get_device_list(section)
if not l:
return
for device in l:
if 'vendor' not in device.attr or \
'product' not in device.attr:
continue
if device.attr['vendor'] == vendor and \
device.attr['product'] == product:
return device
def parse_key(self, key):
key = key.strip()
m = re.search(r'device\.\{([^\}]+)\}\.\{([^\}]+)\}', key)
if m:
return m.group(1), m.group(2)
m = re.search(r'multipath\.\{([^\}]+)\}', key)
if m:
return m.group(1)
class LinuxMpath(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.need_restart = False
self.cf = os.path.join(os.sep, 'etc', 'multipath.conf')
self.nocf = False
self.conf = Conf()
self.keys = self.get_rules()
self.load_file(self.cf)
def fixable(self):
return RET_OK
def load_file(self, p):
if not os.path.exists(p):
perror(p, "does not exist")
self.nocf = True
return
with open(p, 'r') as f:
buff = f.read()
buff = self.strip_comments(buff)
self._load_file(buff, sections_tree)
def strip_comments(self, buff):
lines = buff.split('\n')
l = []
for line in lines:
line = line.strip()
if len(line) == 0:
continue
discard = False
for c in comment_chars:
if line[0] == c:
discard = True
break
try:
i = line.index(c)
line = line[:i]
except ValueError:
pass
if not discard and len(line) > 0:
l.append(line)
return "\n".join(l)
def _load_file(self, buff, sections, chain=[]):
for section, subsections in sections.items():
_chain = chain + [section]
_buff = buff
while True:
data = self.load_section(_buff, section)
if data is None:
break
_buff = data[1]
self.load_keywords(data[0], subsections, _chain)
self._load_file(data[0], subsections, _chain)
def load_keywords(self, buff, subsections, chain):
keywords = {}
keyword = None
for line in buff.split('\n'):
if len(line) == 0:
continue
keyword = line.split()[0]
if keyword in subsections:
continue
value = line[len(keyword):].strip().strip('"')
if len(value) == 0:
continue
if keyword in ('wwid', 'devnode') and chain[-1].startswith('blacklist'):
if keyword not in keywords:
keywords[keyword] = [value]
else:
keywords[keyword] += [value]
else:
keywords[keyword] = value
if chain[-1] == 'device' and chain[0] == 'devices':
s = Section("device")
s.attr = keywords
self.conf.devices.append(s)
elif chain[-1] == 'multipath':
s = Section("multipath")
s.attr = keywords
self.conf.multipaths.append(s)
elif chain[-1] == 'device' and chain[0] == 'blacklist':
s = Section("device")
s.attr = keywords
self.conf.blacklist.devices.append(s)
elif chain[-1] == 'device' and chain[0] == 'blacklist exceptions':
s = Section("device")
s.attr = keywords
self.conf.blacklist_exceptions.devices.append(s)
elif chain[-1] == 'blacklist':
if 'wwid' in keywords:
self.conf.blacklist.wwid = keywords['wwid']
if 'devnode' in keywords:
self.conf.blacklist.devnode = keywords['devnode']
elif chain[-1] == 'blacklist_exceptions':
if 'wwid' in keywords:
self.conf.blacklist_exceptions.wwid = keywords['wwid']
if 'devnode' in keywords:
self.conf.blacklist_exceptions.devnode = keywords['devnode']
elif chain[-1] == 'defaults':
self.conf.defaults.attr = keywords
def load_section(self, buff, section):
l = []
try:
start = buff.index(section)
except ValueError:
return
buff = buff[start:]
try:
buff = buff[buff.index('{')+1:]
except ValueError:
return
depth = 1
for i, c in enumerate(buff):
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth == 0:
return buff[:i], buff[i+1:]
return
def _check_key(self, keyname, target, op, value, verbose=True):
r = RET_OK
if value is None:
if verbose:
perror("%s is not set"%keyname)
return RET_ERR
if type(value) == list:
if str(target) in value:
if verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
return RET_OK
else:
if verbose:
perror("%s=%s is not set"%(keyname, str(target)))
return RET_ERR
if op == '=':
target = str(target).strip()
if str(value) != target:
if verbose:
perror("%s=%s, target: %s"%(keyname, str(value), target))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
else:
if type(value) != int:
if verbose:
perror("%s=%s value must be integer"%(keyname, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s=%s target: <= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s=%s target: >= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
return r
def check_key(self, key, verbose=True):
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
return RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
return RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
target = key['value']
if op not in ('>=', '<=', '='):
if verbose:
perror("'op' must be either '=', '>=' or '<=': %s"%str(key))
return RET_NA
keyname = key['key']
value = self.conf.get(keyname)
if value is None:
if verbose:
perror("%s key is not set"%keyname)
return RET_ERR
r = self._check_key(keyname, target, op, value, verbose=verbose)
return r
def fix_key(self, key):
pinfo("%s=%s set"%(key['key'], key['value']))
self.conf.set(key['key'], key['value'])
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def fix(self):
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
self.fix_key(key)
if not self.conf.changed:
return
if not self.nocf:
import datetime
backup = self.cf+'.'+str(datetime.datetime.now())
try:
import shutil
shutil.copy(self.cf, backup)
except:
perror("failed to backup %s"%self.cf)
return RET_ERR
pinfo(self.cf, "backed up as %s"%backup)
try:
with open(self.cf, 'w') as f:
f.write(str(self.conf))
pinfo(self.cf, "rewritten")
self.need_restart = True
except:
perror("failed to write %s"%self.cf)
if not self.nocf:
shutil.copy(backup, self.cf)
pinfo("backup restored")
return RET_ERR
self.restart_daemon()
return RET_OK
def restart_daemon(self):
if not self.need_restart:
return
candidates = [
"/etc/init.d/multipathd",
"/etc/init.d/multipath-tools",
]
fpath = None
for i in candidates:
if os.path.exists(i):
fpath = i
break
if fpath is None:
perror("multipath tools startup script not found")
return RET_ERR
pinfo("restarting multipath daemon")
cmd = [fpath, "restart"]
p = Popen(cmd, stdin=None, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
err = bdecode(err)
if len(err) > 0:
perror(err)
if __name__ == "__main__":
main(LinuxMpath)
opensvc-1.8~20170412/var/compliance/com.opensvc/user.py 0000755 0001750 0001750 00000040240 13073467726 022752 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_USER_",
"example_value": """
{
"tibco": {
"shell": "/bin/ksh",
"gecos":"agecos"
},
"tibco1": {
"shell": "/bin/tcsh",
"gecos": "another gecos"
}
}
""",
"description": """* Verify a local system user configuration
* A minus (-) prefix to the user name indicates the user should not exist
Environment variable modifying the object behaviour:
* OSVC_COMP_USERS_INITIAL_PASSWD=true|false
""",
"form_definition": """
Desc: |
A rule defining a list of Unix users and their properties. Used by the users and group_membership compliance objects.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict of dict
Key: user
EmbedKey: No
Class: user
Inputs:
-
Id: user
Label: User name
DisplayModeLabel: user
LabelCss: guy16
Mandatory: Yes
Type: string
Help: The Unix user name.
-
Id: uid
Label: User id
DisplayModeLabel: uid
LabelCss: guy16
Mandatory: Yes
Type: string or integer
Help: The Unix uid of this user.
-
Id: gid
Label: Group id
DisplayModeLabel: gid
LabelCss: guys16
Mandatory: Yes
Type: string or integer
Help: The Unix principal gid of this user.
-
Id: shell
Label: Login shell
DisplayModeLabel: shell
LabelCss: action16
Type: string
Help: The Unix login shell for this user.
-
Id: home
Label: Home directory
DisplayModeLabel: home
LabelCss: action16
Type: string
Help: The Unix home directory full path for this user.
-
Id: password
Label: Password hash
DisplayModeLabel: pwd
LabelCss: action16
Type: string
Help: The password hash for this user. It is recommanded to set it to '!!' or to set initial password to change upon first login. Leave empty to not check nor set the password.
-
Id: gecos
Label: Gecos
DisplayModeLabel: gecos
LabelCss: action16
Type: string
Help: A one-line comment field describing the user.
-
Id: check_home
Label: Enforce homedir ownership
DisplayModeLabel: home ownership
LabelCss: action16
Type: string
Default: yes
Candidates:
- "yes"
- "no"
Help: Toggles the user home directory ownership checking.
""",
}
import os
import sys
import json
import pwd
import re
from utilities import which
try:
import spwd
cap_shadow = True
except:
cap_shadow = False
from subprocess import Popen, list2cmdline, PIPE
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"root",
"bin",
"daemon",
"adm",
"lp",
"sync",
"shutdown",
"halt",
"mail",
"news",
"uucp",
"operator",
"nobody",
"nscd",
"vcsa",
"pcap",
"mailnull",
"smmsp",
"sshd",
"rpc",
"avahi",
"rpcuser",
"nfsnobody",
"haldaemon",
"avahi-autoipd",
"ntp"
]
class CompUser(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.pwt = {
'shell': 'pw_shell',
'home': 'pw_dir',
'uid': 'pw_uid',
'gid': 'pw_gid',
'gecos': 'pw_gecos',
'password': 'pw_passwd',
}
self.spwt = {
'spassword': 'sp_pwd',
}
self.usermod_p = {
'shell': '-s',
'home': '-d',
'uid': '-u',
'gid': '-g',
'gecos': '-c',
'password': '-p',
'spassword': '-p',
}
self.sysname, self.nodename, x, x, self.machine = os.uname()
if "OSVC_COMP_USERS_INITIAL_PASSWD" in os.environ and \
os.environ["OSVC_COMP_USERS_INITIAL_PASSWD"] == "true":
self.initial_passwd = True
else:
self.initial_passwd = False
if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1', 'FreeBSD']:
perror('module not supported on', self.sysname)
raise NotApplicable()
if self.sysname == "FreeBSD":
self.useradd = ["pw", "useradd"]
self.usermod = ["pw", "usermod"]
self.userdel = ["pw", "userdel"]
else:
self.useradd = ["useradd"]
self.usermod = ["usermod"]
self.userdel = ["userdel"]
self.users = {}
for d in self.get_rules():
for user in d:
if user not in self.users:
self.users[user] = d[user]
else:
for key in self.usermod_p.keys():
if key in d[user] and key not in self.users[user]:
self.users[user][key] = d[user][key]
for user, d in self.users.items():
for k in ('uid', 'gid'):
if k in self.users[user]:
self.users[user][k] = int(d[k])
if "password" in d and len(d["password"]) == 0:
del(self.users[user]["password"])
if cap_shadow:
if "password" in d and len(d["password"]) > 0 and \
("spassword" not in d or len(d["spassword"]) == 0):
self.users[user]["spassword"] = self.users[user]["password"]
del self.users[user]["password"]
if "spassword" not in d:
self.users[user]["spassword"] = "x"
else:
if "spassword" in d and len(d["spassword"]) > 0 and \
("password" not in d or len(d["password"]) == 0):
self.users[user]["password"] = self.users[user]["spassword"]
del self.users[user]["spassword"]
if "password" not in d:
self.users[user]["password"] = "x"
def fixable(self):
if not which(self.usermod[0]):
perror(self.usermod[0], "program not found")
return RET_ERR
return RET_OK
def grpconv(self):
if not cap_shadow or not os.path.exists('/etc/gshadow'):
return
if not which('grpconv'):
return
with open('/etc/group', 'r') as f:
buff = f.read()
l = []
for line in buff.split('\n'):
u = line.split(':')[0]
if u in l:
perror("duplicate group %s in /etc/group. skip grpconv (grpconv bug workaround)"%u)
return
l.append(u)
p = Popen(['grpconv'])
p.communicate()
def pwconv(self):
if not cap_shadow or not os.path.exists('/etc/shadow'):
return
if not which('pwconv'):
return
p = Popen(['pwconv'])
p.communicate()
def fix_item(self, user, item, target):
if item in ["password", "spassword"]:
if self.initial_passwd:
pinfo("skip", user, "password modification in initial_passwd mode")
return RET_OK
if target == "x":
return RET_OK
if self.sysname in ("AIX"):
return RET_OK
cmd = [] + self.usermod
if self.sysname == "FreeBSD":
cmd.append(user)
cmd += [self.usermod_p[item], str(target)]
if item == 'home':
cmd.append('-m')
if self.sysname != "FreeBSD":
cmd.append(user)
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
self.pwconv()
self.grpconv()
if r == 0:
return RET_OK
else:
return RET_ERR
def check_item(self, user, item, target, current, verbose=False):
if type(current) == int and current < 0:
current += 4294967296
if sys.version_info[0] < 3 and type(current) == str and type(target) == unicode:
current = unicode(current, errors="ignore")
if target == current:
if verbose:
pinfo('user', user, item+':', current)
return RET_OK
elif "passw" in item and target == "!!" and current == "":
if verbose:
pinfo('user', user, item+':', current)
return RET_OK
else:
if verbose:
perror('user', user, item+':', current, 'target:', target)
return RET_ERR
def check_user_del(self, user, verbose=True):
r = 0
try:
userinfo=pwd.getpwnam(user)
except KeyError:
if verbose:
pinfo('user', user, 'does not exist, on target')
return RET_OK
if verbose:
perror('user', user, "exists, shouldn't")
return RET_ERR
def check_user(self, user, props, verbose=True):
if user.startswith('-'):
return self.check_user_del(user.lstrip('-'), verbose=verbose)
r = 0
try:
userinfo=pwd.getpwnam(user)
except KeyError:
if self.try_create_user(props):
if verbose:
perror('user', user, 'does not exist')
return RET_ERR
else:
if verbose:
perror('user', user, 'does not exist and not enough info to create it')
return RET_ERR
for prop in self.pwt:
if prop in props:
if prop == "password":
if self.initial_passwd:
if verbose:
pinfo("skip", user, "passwd checking in initial_passwd mode")
continue
if props[prop] == "x":
continue
r |= self.check_item(user, prop, props[prop], getattr(userinfo, self.pwt[prop]), verbose=verbose)
if 'check_home' not in props or props['check_home'] == "yes":
r |= self.check_home_ownership(user, verbose=verbose)
if not cap_shadow:
return r
try:
usersinfo=spwd.getspnam(user)
except KeyError:
if "spassword" in props:
if verbose:
perror(user, "not declared in /etc/shadow")
r |= RET_ERR
usersinfo = None
if usersinfo is not None:
for prop in self.spwt:
if prop in props:
if prop == "spassword":
if self.initial_passwd:
if verbose:
pinfo("skip", user, "spasswd checking in initial_passwd mode")
continue
if props[prop] == "x":
continue
r |= self.check_item(user, prop, props[prop], getattr(usersinfo, self.spwt[prop]), verbose=verbose)
return r
def try_create_user(self, props):
#
# don't try to create user if passwd db is not 'files'
# beware: 'files' db is the implicit default
#
if 'db' in props and props['db'] != 'files':
return False
return True
def get_uid(self, user):
import pwd
try:
info=pwd.getpwnam(user)
uid = info[2]
except:
perror("user %s does not exist"%user)
raise ComplianceError()
return uid
def check_home_ownership(self, user, verbose=True):
path = os.path.expanduser("~"+user)
if not os.path.exists(path):
if verbose:
perror(path, "homedir does not exist")
return RET_ERR
tuid = self.get_uid(user)
uid = os.stat(path).st_uid
if uid != tuid:
if verbose: perror(path, 'uid should be %s but is %s'%(str(tuid), str(uid)))
return RET_ERR
if verbose: pinfo(path, 'owner is', user)
return RET_OK
def fix_home_ownership(self, user):
if self.check_home_ownership(user, verbose=False) == RET_OK:
return RET_OK
uid = self.get_uid(user)
path = os.path.expanduser("~"+user)
if not os.path.exists(path):
if os.path.exists("/etc/skel"):
cmd = ['cp', '-R', '/etc/skel/', path]
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r != 0:
return RET_ERR
cmd = ['chown', '-R', str(uid), path]
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r != 0:
return RET_ERR
else:
os.makedirs(path)
os.chown(path, uid, -1)
return RET_OK
def unlock_user(self, user):
if self.sysname != "SunOS":
return
cmd = ["uname", "-r"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
if out.strip() == '5.8':
unlock_opt = '-d'
else:
unlock_opt = '-u'
cmd = ["passwd", unlock_opt, user]
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def create_user(self, user, props):
cmd = [] + self.useradd
if self.sysname == "FreeBSD":
cmd += [user]
for item in props:
if item == "check_home":
continue
prop = str(props[item])
if len(prop) == 0:
continue
if item.endswith("password") and self.sysname in ("AIX", "SunOS", "OSF1"):
continue
cmd = cmd + self.usermod_p[item].split() + [prop]
if item == "home":
cmd.append("-m")
if self.sysname != "FreeBSD":
cmd += [user]
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
if self.unlock_user(user) == RET_ERR:
return RET_ERR
return RET_OK
else:
return RET_ERR
def fix_user_del(self, user):
if user in blacklist:
perror("delete", user, "... cowardly refusing")
return RET_ERR
cmd = self.userdel + [user]
pinfo(list2cmdline(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_user(self, user, props):
if user.startswith('-'):
return self.fix_user_del(user.lstrip('-'))
r = 0
try:
userinfo = pwd.getpwnam(user)
except KeyError:
if self.try_create_user(props):
return self.create_user(user, props)
else:
pinfo('user', user, 'does not exist and not enough info to create it')
return RET_OK
for prop in self.pwt:
if prop in props and \
self.check_item(user, prop, props[prop], getattr(userinfo, self.pwt[prop])) != RET_OK:
r |= self.fix_item(user, prop, props[prop])
if 'check_home' not in props or props['check_home'] == "yes":
r |= self.fix_home_ownership(user)
if not cap_shadow:
return r
try:
usersinfo = spwd.getspnam(user)
except KeyError:
if "spassword" in props:
self.fix_item(user, "spassword", props["spassword"])
usersinfo = spwd.getspnam(user)
else:
usersinfo = None
if usersinfo is not None:
for prop in self.spwt:
if prop in props and \
self.check_item(user, prop, props[prop], getattr(usersinfo, self.spwt[prop])) != RET_OK:
r |= self.fix_item(user, prop, props[prop])
return r
def check(self):
r = 0
for user, props in self.users.items():
r |= self.check_user(user, props)
return r
def fix(self):
r = 0
for user, props in self.users.items():
if self.check_user(user, props, verbose=False) == RET_ERR:
r |= self.fix_user(user, props)
return r
if __name__ == "__main__":
main(CompUser)
opensvc-1.8~20170412/var/compliance/com.opensvc/authkey.py 0000755 0001750 0001750 00000041652 13073467726 023456 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_AUTHKEY_",
"example_value": """
{
"action": "add",
"authfile": "authorized_keys",
"user": "testuser",
"key": "ssh-dss AAAAB3NzaC1kc3MAAACBAPiO1jlT+5yrdPLfQ7sYF52NkfCEzT0AUUNIl+14Sbkubqe+TcU7U3taUtiDJ5YOGOzIVFIDGGtwD0AqNHQbvsiS1ywtC5BJ9362FlrpVH4o1nVZPvMxRzz5hgh3HjxqIWqwZDx29qO8Rg1/g1Gm3QYCxqPFn2a5f2AUiYqc1wtxAAAAFQC49iboZGNqssicwUrX6TUrT9H0HQAAAIBo5dNRmTF+Vd/+PI0JUOIzPJiHNKK9rnySlaxSDml9hH2LuDSjYz7BWuNP8UnPOa2pcFA4meDp5u8d5dGOWxkuYO0bLnXwDZuHtDW/ySytjwEaBLPxoqRBAyfyQNlusGsuiqDYRA7j7bS0RxINBxvDw79KdyQhuOn8/lKVG+sjrQAAAIEAoShly/JlGLQxQzPyWADV5RFlaRSPaPvFzcYT3hS+glkVd6yrCbzc30Yc8Ndu4cflQiXSZzRoUMgsy5PzuiH1M8JjwHTGNl8r9OfJpnN/OaAhMpIyA06y1ZZD9iEME3UmthFQoZnfRuE3yxi7bqyXJU4rOq04iyCTpU1UKInPdXQ= testuser"
}
""",
"description": """* Installs or removes ssh public keys from authorized_key files
* Looks up the authorized_key and authorized_key2 file location in the running sshd daemon configuration.
* Add user to sshd_config AllowUser and AllowGroup if used
* Reload sshd if sshd_config has been changed
""",
"form_definition": """
Desc: |
Describe a list of ssh public keys to authorize login as the specified Unix user.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict
Class: authkey
Inputs:
-
Id: action
Label: Action
DisplayModeLabel: action
LabelCss: action16
Mandatory: Yes
Type: string
Candidates:
- add
- del
Help: Defines wether the public key must be installed or uninstalled.
-
Id: user
Label: User
DisplayModeLabel: user
LabelCss: guy16
Mandatory: Yes
Type: string
Help: Defines the Unix user name who will accept those ssh public keys.
-
Id: key
Label: Public key
DisplayModeLabel: key
LabelCss: guy16
Mandatory: Yes
Type: text
DisplayModeTrim: 60
Help: The ssh public key as seen in authorized_keys files.
-
Id: authfile
Label: Authorized keys file name
DisplayModeLabel: authfile
LabelCss: hd16
Mandatory: Yes
Candidates:
- authorized_keys
- authorized_keys2
Default: authorized_keys2
Type: string
Help: The authorized_keys file to write the keys into.
"""
}
import os
import sys
import pwd, grp
import datetime
import shutil
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompAuthKeys(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.authkeys = self.get_rules()
for ak in self.authkeys:
ak['key'] = ak['key'].replace('\n', '')
self.installed_keys_d = {}
self.default_authfile = "authorized_keys2"
self.allowusers_check_done = []
self.allowusers_fix_todo = []
self.allowgroups_check_done = []
self.allowgroups_fix_todo = []
def sanitize(self, ak):
if 'user' not in ak:
perror("no user set in rule")
return False
if 'key' not in ak:
perror("no key set in rule")
return False
if 'action' not in ak:
ak['action'] = 'add'
if 'authfile' not in ak:
ak['authfile'] = self.default_authfile
if ak['authfile'] not in ("authorized_keys", "authorized_keys2"):
perror("unsupported authfile:", ak['authfile'], "(default to", self.default_authfile+")")
ak['authfile'] = self.default_authfile
for key in ('user', 'key', 'action', 'authfile'):
ak[key] = ak[key].strip()
return ak
def fixable(self):
return RET_NA
def truncate_key(self, key):
if len(key) < 50:
s = key
else:
s = "'%s ... %s'" % (key[0:17], key[-30:])
return s
def reload_sshd(self):
cmd = ['ps', '-ef']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror("can not find sshd process")
return RET_ERR
out = bdecode(out)
for line in out.splitlines():
if not line.endswith('sbin/sshd'):
continue
l = line.split()
pid = int(l[1])
name = l[-1]
pinfo("send sighup to pid %d (%s)" % (pid, name))
os.kill(pid, 1)
return RET_OK
perror("can not find sshd process to signal")
return RET_ERR
def get_sshd_config(self):
cfs = []
if hasattr(self, "cache_sshd_config_f"):
return self.cache_sshd_config_f
cmd = ['ps', '-eo', 'comm']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode == 0:
out = bdecode(out)
l = out.splitlines()
if '/usr/local/sbin/sshd' in l:
cfs.append(os.path.join(os.sep, 'usr', 'local', 'etc', 'sshd_config'))
if '/usr/sfw/sbin/sshd' in l:
cfs.append(os.path.join(os.sep, 'etc', 'sshd_config'))
cfs += [os.path.join(os.sep, 'etc', 'ssh', 'sshd_config'),
os.path.join(os.sep, 'opt', 'etc', 'sshd_config'),
os.path.join(os.sep, 'etc', 'opt', 'ssh', 'sshd_config'),
os.path.join(os.sep, 'usr', 'local', 'etc', 'sshd_config')]
cf = None
for _cf in cfs:
if os.path.exists(_cf):
cf = _cf
break
self.cache_sshd_config_f = cf
if cf is None:
perror("sshd_config not found")
return None
return cf
def _get_authkey_file(self, key):
if key == "authorized_keys":
# default
return ".ssh/authorized_keys"
elif key == "authorized_keys2":
key = "AuthorizedKeysFile"
else:
perror("unknown key", key)
return None
cf = self.get_sshd_config()
if cf is None:
perror("sshd_config not found")
return None
with open(cf, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
l = line.split()
if len(l) != 2:
continue
if l[0].strip() == key:
return l[1]
# not found, return default
return ".ssh/authorized_keys2"
def get_allowusers(self):
if hasattr(self, "cache_allowusers"):
return self.cache_allowusers
cf = self.get_sshd_config()
if cf is None:
perror("sshd_config not found")
return None
with open(cf, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
l = line.split()
if len(l) < 2:
continue
if l[0].strip() == "AllowUsers":
self.cache_allowusers = l[1:]
return l[1:]
self.cache_allowusers = None
return None
def get_allowgroups(self):
if hasattr(self, "cache_allowgroups"):
return self.cache_allowgroups
cf = self.get_sshd_config()
if cf is None:
perror("sshd_config not found")
return None
with open(cf, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
l = line.split()
if len(l) < 2:
continue
if l[0].strip() == "AllowGroups":
self.cache_allowgroups = l[1:]
return l[1:]
self.cache_allowgroups = None
return None
def get_authkey_file(self, key, user):
p = self._get_authkey_file(key)
if p is None:
return None
p = p.replace('%u', user)
p = p.replace('%h', os.path.expanduser('~'+user))
p = p.replace('~', os.path.expanduser('~'+user))
if not p.startswith('/'):
p = os.path.join(os.path.expanduser('~'+user), p)
return p
def get_authkey_files(self, user):
l = []
p = self.get_authkey_file('authorized_keys', user)
if p is not None:
l.append(p)
p = self.get_authkey_file('authorized_keys2', user)
if p is not None:
l.append(p)
return l
def get_installed_keys(self, user):
if user in self.installed_keys_d:
return self.installed_keys_d[user]
else:
self.installed_keys_d[user] = []
ps = self.get_authkey_files(user)
for p in ps:
if not os.path.exists(p):
continue
with open(p, 'r') as f:
self.installed_keys_d[user] += f.read().splitlines()
return self.installed_keys_d[user]
def get_user_group(self, user):
gid = pwd.getpwnam(user).pw_gid
try:
gname = grp.getgrgid(gid).gr_name
except KeyError:
gname = None
return gname
def fix_allowusers(self, ak, verbose=True):
self.check_allowuser(ak, verbose=False)
if not ak['user'] in self.allowusers_fix_todo:
return RET_OK
self.allowusers_fix_todo.remove(ak['user'])
au = self.get_allowusers()
if au is None:
return RET_OK
l = ["AllowUsers"] + au + [ak['user']]
s = " ".join(l)
pinfo("adding", ak['user'], "to currently allowed users")
cf = self.get_sshd_config()
if cf is None:
perror("sshd_config not found")
return None
with open(cf, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for i, line in enumerate(lines):
l = line.split()
if len(l) < 2:
continue
if l[0].strip() == "AllowUsers":
lines[i] = s
buff = "\n".join(lines)
backup = cf+'.'+str(datetime.datetime.now())
shutil.copy(cf, backup)
with open(cf, 'w') as f:
f.write(buff)
self.reload_sshd()
return RET_OK
def fix_allowgroups(self, ak, verbose=True):
self.check_allowgroup(ak, verbose=False)
if not ak['user'] in self.allowgroups_fix_todo:
return RET_OK
self.allowgroups_fix_todo.remove(ak['user'])
ag = self.get_allowgroups()
if ag is None:
return RET_OK
ak['group'] = self.get_user_group(ak['user'])
if ak['group'] is None:
perror("can not set AllowGroups in sshd_config: primary group of user %s not found" % ak['user'])
return RET_ERR
l = ["AllowGroups"] + ag + [ak['group']]
s = " ".join(l)
pinfo("adding", ak['group'], "to currently allowed groups")
cf = self.get_sshd_config()
if cf is None:
perror("sshd_config not found")
return RET_ERR
with open(cf, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for i, line in enumerate(lines):
l = line.split()
if len(l) < 2:
continue
if l[0].strip() == "AllowGroups":
lines[i] = s
buff = "\n".join(lines)
backup = cf+'.'+str(datetime.datetime.now())
shutil.copy(cf, backup)
with open(cf, 'w') as f:
f.write(buff)
self.reload_sshd()
return RET_OK
def check_allowuser(self, ak, verbose=True):
if ak['user'] in self.allowusers_check_done:
return RET_OK
self.allowusers_check_done.append(ak['user'])
au = self.get_allowusers()
if au is None:
return RET_OK
elif ak['user'] in au:
if verbose:
pinfo(ak['user'], "is correctly set in sshd AllowUsers")
r = RET_OK
else:
if verbose:
perror(ak['user'], "is not set in sshd AllowUsers")
self.allowusers_fix_todo.append(ak['user'])
r = RET_ERR
return r
def check_allowgroup(self, ak, verbose=True):
if ak['user'] in self.allowgroups_check_done:
return RET_OK
self.allowgroups_check_done.append(ak['user'])
ag = self.get_allowgroups()
if ag is None:
return RET_OK
ak['group'] = self.get_user_group(ak['user'])
if ak['group'] is None:
if verbose:
perror("can not determine primary group of user %s to add to AllowGroups" % ak['user'])
return RET_ERR
elif ak['group'] in ag:
if verbose:
pinfo(ak['group'], "is correctly set in sshd AllowGroups")
r = RET_OK
else:
if verbose:
perror(ak['group'], "is not set in sshd AllowGroups")
self.allowgroups_fix_todo.append(ak['user'])
r = RET_ERR
return r
def check_authkey(self, ak, verbose=True):
ak = self.sanitize(ak)
installed_keys = self.get_installed_keys(ak['user'])
if ak['action'] == 'add':
if ak['key'] not in installed_keys:
if verbose:
perror('key', self.truncate_key(ak['key']), 'must be installed for user', ak['user'])
r = RET_ERR
else:
if verbose:
pinfo('key', self.truncate_key(ak['key']), 'is correctly installed for user', ak['user'])
r = RET_OK
elif ak['action'] == 'del':
if ak['key'] in installed_keys:
if verbose:
perror('key', self.truncate_key(ak['key']), 'must be uninstalled for user', ak['user'])
r = RET_ERR
else:
if verbose:
pinfo('key', self.truncate_key(ak['key']), 'is correctly not installed for user', ak['user'])
r = RET_OK
else:
perror("unsupported action:", ak['action'])
return RET_ERR
return r
def fix_authkey(self, ak):
ak = self.sanitize(ak)
if ak['action'] == 'add':
r = self.add_authkey(ak)
return r
elif ak['action'] == 'del':
return self.del_authkey(ak)
else:
perror("unsupported action:", ak['action'])
return RET_ERR
def add_authkey(self, ak):
if self.check_authkey(ak, verbose=False) == RET_OK:
return RET_OK
try:
userinfo=pwd.getpwnam(ak['user'])
except KeyError:
perror('user', ak['user'], 'does not exist')
return RET_ERR
p = self.get_authkey_file(ak['authfile'], ak['user'])
if p is None:
perror("could not determine", ak['authfile'], "location")
return RET_ERR
base = os.path.dirname(p)
if not os.path.exists(base):
os.makedirs(base, 0o0700)
pinfo(base, "created")
if p.startswith(os.path.expanduser('~'+ak['user'])):
os.chown(base, userinfo.pw_uid, userinfo.pw_gid)
pinfo(base, "ownership set to %d:%d"%(userinfo.pw_uid, userinfo.pw_gid))
if not os.path.exists(p):
with open(p, 'w') as f:
f.write("")
pinfo(p, "created")
os.chmod(p, 0o0600)
pinfo(p, "mode set to 0600")
os.chown(p, userinfo.pw_uid, userinfo.pw_gid)
pinfo(p, "ownetship set to %d:%d"%(userinfo.pw_uid, userinfo.pw_gid))
with open(p, 'a') as f:
f.write(ak['key'])
if not ak['key'].endswith('\n'):
f.write('\n')
pinfo('key', self.truncate_key(ak['key']), 'installed for user', ak['user'])
return RET_OK
def del_authkey(self, ak):
if self.check_authkey(ak, verbose=False) == RET_OK:
pinfo('key', self.truncate_key(ak['key']), 'is already not installed for user', ak['user'])
return RET_OK
ps = self.get_authkey_files(ak['user'])
for p in ps:
base = os.path.basename(p)
if not os.path.exists(p):
continue
with open(p, 'r') as f:
l = f.read().split('\n')
n = len(l)
while True:
try:
l.remove(ak['key'].replace('\n', ''))
except ValueError:
break
if len(l) == n:
# nothing changed
continue
with open(p, 'w') as f:
f.write('\n'.join(l))
pinfo('key', self.truncate_key(ak['key']), 'uninstalled for user', ak['user'])
return RET_OK
def check(self):
r = 0
for ak in self.authkeys:
r |= self.check_authkey(ak)
if ak['action'] == 'add':
r |= self.check_allowgroup(ak)
r |= self.check_allowuser(ak)
return r
def fix(self):
r = 0
for ak in self.authkeys:
r |= self.fix_authkey(ak)
if ak['action'] == 'add':
r |= self.fix_allowgroups(ak)
r |= self.fix_allowusers(ak)
return r
if __name__ == "__main__":
main(CompAuthKeys)
opensvc-1.8~20170412/var/compliance/com.opensvc/group_membership.py 0000755 0001750 0001750 00000017324 13073467726 025352 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_value": """
{
"tibco": {
"members": ["tibco", "tibco1"]
},
"tibco1": {
"members": ["tibco1"]
}
}
""",
"description": """* Verify a local system group configuration
* A minus (-) prefix to the group name indicates the user should not exist
""",
"form_definition": """
Desc: |
A rule defining a list of Unix groups and their user membership. The referenced users and groups must exist.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict of dict
Key: group
EmbedKey: No
Class: group_membership
Inputs:
-
Id: group
Label: Group name
DisplayModeLabel: group
LabelCss: guys16
Mandatory: Yes
Type: string
Help: The Unix group name.
-
Id: members
Label: Group members
DisplayModeLabel: members
LabelCss: guy16
Type: list of string
Help: A comma-separed list of Unix user names members of this group.
""",
}
import os
import sys
import json
import grp
from subprocess import *
from utilities import which
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompGroupMembership(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.member_of_h = {}
self.grt = {
'members': 'gr_mem',
}
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1']:
perror('group_membership: compliance object not supported on', self.sysname)
raise NotApplicable
self.groups = {}
for d in self.get_rules():
if type(d) != dict:
continue
for k, v in d.items():
if "members" not in v:
continue
for i, m in enumerate(v["members"]):
d[k]["members"][i] = m.strip()
self.groups.update(d)
if os.path.exists('/usr/xpg4/bin/id'):
self.id_bin = '/usr/xpg4/bin/id'
else:
self.id_bin = 'id'
def get_primary_group(self, user):
cmd = [self.id_bin, "-gn", user]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
return out.strip()
def member_of(self, user, refresh=False):
if not refresh and user in self.member_of_h:
# cache hit
return self.member_of_h[user]
eg = self.get_primary_group(user)
if eg is None:
self.member_of_h[user] = []
return []
cmd = [self.id_bin, "-Gn", user]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
self.member_of_h[user] = []
return self.member_of_h[user]
ag = set(out.strip().split())
ag -= set([eg])
self.member_of_h[user] = ag
return self.member_of_h[user]
def fixable(self):
return RET_NA
def del_member(self, group, user):
ag = self.member_of(user)
if len(ag) == 0:
return 0
g = ag - set([group])
g = ','.join(g)
return self.fix_member(g, user)
def add_member(self, group, user):
if 0 != self._check_member_accnt(user):
perror('group', group+':', 'cannot add inexistant user "%s"'%user)
return RET_ERR
if self.get_primary_group(user) == group:
pinfo("group %s is already the primary group of user %s: skip declaration as a secondary group (you may want to change your rule)" % (group, user))
return RET_OK
ag = self.member_of(user)
g = ag | set([group])
g = ','.join(g)
return self.fix_member(g, user)
def fix_member(self, g, user):
cmd = ['usermod', '-G', g, user]
pinfo("group_membership:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
ag = self.member_of(user, refresh=True)
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_members(self, group, target):
r = 0
for user in target:
if group in self.member_of(user):
continue
r += self.add_member(group, user)
return r
def fix_item(self, group, item, target):
if item == 'members':
return self.fix_members(group, target)
else:
perror("group_membership:", 'no fix implemented for', item)
return RET_ERR
def _check_member_accnt(self, user):
if which('getent'):
xcmd = ['getent', 'passwd', user]
elif which('pwget'):
xcmd = ['pwget', '-n', user]
else:
return 0
xp = Popen(xcmd, stdout=PIPE, stderr=PIPE, close_fds=True)
xout, xerr = xp.communicate()
return xp.returncode
def _check_members_accnts(self, group, user_list, which, verbose):
r = RET_OK
for user in user_list:
rc = self._check_member_accnt(user)
if rc != 0:
r |= RET_ERR
if verbose:
perror('group', group, '%s member "%s" does not exist'%(which, user))
return r
def filter_target(self, group, target):
new_target = []
for user in target:
pg = self.get_primary_group(user)
if pg == group:
continue
new_target.append(user)
discarded = set(target)-set(new_target)
if len(discarded) > 0:
pinfo("group %s members discarded: %s, as they already use this group as primary (you may want to change your rule)" % (group, ', '.join(discarded)))
return new_target
def check_item(self, group, item, target, current, verbose=False):
r = RET_OK
if item == 'members':
r |= self._check_members_accnts(group, current, 'existing', verbose)
r |= self._check_members_accnts(group, target, 'target', verbose)
if not isinstance(current, list):
current = [current]
target = self.filter_target(group, target)
if set(target) <= set(current):
if verbose:
pinfo('group', group, item+':', ', '.join(current))
return r
else:
if verbose:
perror('group', group, item+':', ', '.join(current), '| target:', ', '.join(target))
return r|RET_ERR
def check_group(self, group, props):
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
pinfo('group', group, 'does not exist')
return RET_OK
for prop in self.grt:
if prop in props:
r |= self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop]), verbose=True)
return r
def fix_group(self, group, props):
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
pinfo('group', group, 'does not exist')
return RET_OK
for prop in self.grt:
if prop in props and \
self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop])) != RET_OK:
r |= self.fix_item(group, prop, props[prop])
return r
def check(self):
r = 0
for group, props in self.groups.items():
r |= self.check_group(group, props)
return r
def fix(self):
r = 0
for group, props in self.groups.items():
r |= self.fix_group(group, props)
return r
if __name__ == "__main__":
main(CompGroupMembership)
opensvc-1.8~20170412/var/compliance/com.opensvc/sysctl.py 0000755 0001750 0001750 00000020242 13073467726 023315 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_SYSCTL_",
"example_value": """
{
"key": "vm.lowmem_reserve_ratio",
"index": 1,
"op": ">",
"value": 256
}
""",
"description": """* Verify a linux kernel parameter value is on target
* Live parameter value (sysctl executable)
* Persistent parameter value (/etc/sysctl.conf)
""",
"form_definition": """
Desc: |
A rule to set a list of Linux kernel parameters to be set in /etc/sysctl.conf. Current values can be checked as strictly equal, or superior/inferior to their target value. Each field in a vectored value can be tuned independantly using the index key.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: sysctl
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The /etc/sysctl.conf parameter to check.
-
Id: index
Label: Index
DisplayModeLabel: idx
LabelCss: action16
Mandatory: Yes
Default: 0
Type: integer
Help: The /etc/sysctl.conf parameter to check.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The /etc/sysctl.conf parameter target value.
""",
}
import os
import sys
import json
import pwd
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class Sysctl(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
if os.uname()[0] != "Linux":
raise NotApplicable()
self.need_reload = False
self.cf = os.path.join(os.sep, "etc", "sysctl.conf")
if not os.path.exists(self.cf):
perror(self.cf, 'does not exist')
raise NotApplicable()
self.keys = []
self.cache = None
self.keys = self.get_rules()
if len(self.keys) == 0:
raise NotApplicable()
self.convert_keys()
def fixable(self):
return RET_OK
def parse_val(self, val):
val = list(map(lambda x: x.strip(), val.strip().split()))
for i, e in enumerate(val):
try:
val[i] = int(e)
except:
pass
return val
def get_keys(self):
with open(self.cf, 'r') as f:
buff = f.read()
if self.cache is None:
self.cache = {}
for line in buff.splitlines():
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
key = l[0].strip()
val = self.parse_val(l[1])
self.cache[key] = val
def get_live_key(self, key):
p = Popen(['sysctl', key], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
l = bdecode(out).split('=')
if len(l) != 2:
return None
val = self.parse_val(l[1])
return val
def get_key(self, key):
if self.cache is None:
self.get_keys()
if key not in self.cache:
return None
return self.cache[key]
def fix_key(self, key):
done = False
target = key['value']
index = key['index']
with open(self.cf, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
continue
l = line.split('=')
if len(l) != 2:
continue
keyname = l[0].strip()
if key['key'] != keyname:
continue
if done:
pinfo("sysctl: remove redundant key %s"%keyname)
del lines[i]
continue
val = self.parse_val(l[1])
if target == val[index]:
done = True
continue
pinfo("sysctl: set %s[%d] = %s"%(keyname, index, str(target)))
val[index] = target
lines[i] = "%s = %s"%(keyname, " ".join(map(str, val)))
done = True
if not done:
# if key is not in sysctl.conf, get the value from kernel
val = self.get_live_key(key['key'])
if val is None:
perror("key '%s' not found in live kernel parameters" % key['key'])
return RET_ERR
if target != val[index]:
val[index] = target
pinfo("sysctl: set %s = %s"%(key['key'], " ".join(map(str, val))))
lines += ["%s = %s"%(key['key'], " ".join(map(str, val)))]
try:
with open(self.cf, 'w') as f:
f.write('\n'.join(lines))
except:
perror("failed to write sysctl.conf")
return RET_ERR
return RET_OK
def convert_keys(self):
keys = []
for key in self.keys:
keyname = key['key']
value = key['value']
if type(value) == list:
if len(value) > 0 and type(value[0]) != list:
value = [value]
for i, v in enumerate(value):
keys.append({
"key": keyname,
"index": i,
"op": v[0],
"value": v[1],
})
elif 'key' in key and 'index' in key and 'op' in key and 'value' in key:
keys.append(key)
self.keys = keys
def check_key(self, key, verbose=False):
r = RET_OK
keyname = key['key']
target = key['value']
op = key['op']
i = key['index']
current_value = self.get_key(keyname)
current_live_value = self.get_live_key(keyname)
if current_value is None:
if verbose:
perror("key '%s' not found in sysctl.conf"%keyname)
return RET_ERR
if op == "=" and str(current_value[i]) != str(target):
if verbose:
perror("sysctl err: %s[%d] = %s, target: %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == ">=" and type(target) == int and current_value[i] < target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: >= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
elif op == "<=" and type(target) == int and current_value[i] > target:
if verbose:
perror("sysctl err: %s[%d] = %s, target: <= %s"%(keyname, i, str(current_value[i]), str(target)))
r |= RET_ERR
else:
if verbose:
pinfo("sysctl ok: %s[%d] = %s, on target"%(keyname, i, str(current_value[i])))
if r == RET_OK and current_live_value is not None and current_value != current_live_value:
if verbose:
perror("sysctl err: %s on target in sysctl.conf but kernel value is different"%(keyname))
self.need_reload = True
r |= RET_ERR
return r
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def reload_sysctl(self):
cmd = ['sysctl', '-e', '-p']
pinfo("sysctl:", " ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.communicate()
if p.returncode != 0:
perror("reload failed")
return RET_ERR
return RET_OK
def fix(self):
r = 0
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
self.need_reload = True
r |= self.fix_key(key)
if self.need_reload:
r |= self.reload_sysctl()
return r
if __name__ == "__main__":
main(Sysctl)
opensvc-1.8~20170412/var/compliance/com.opensvc/smfcfgs.py 0000755 0001750 0001750 00000031321 13073467726 023424 0 ustar jkelbert jkelbert #!/usr/bin/env python
"""
The ENV variable format is json-serialized [list of dict]:
[
{
"fmri": "svc:/network/ntp"
"prop": "config/slew_always"
"type": "boolean"
"value": "true"
"inorder": 0
"create": 1
"reload": 0
"sleep": 0
}
{
"fmri": "svc:/network/dns/client"
"prop": "config/nameserver"
"type": "net_address"
"value": "172.30.65.165 172.30.65.164"
"inorder": 0
"create": 1
"reload": 0
"sleep": 6
}
{
"fmri": "svc:/network/dns/client"
"prop": "config/search"
"type": "astring"
"value": "cpdev.local cpprod.root.local cpgrp.root.local"
"inorder": 1
"create": 1
"reload": 0
"sleep": 9
}
]
"""
import os
import sys
import json
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class AutoInst(dict):
"""autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class SmfCfgS(object):
def __init__(self, prefix='OSVC_COMP_SMF_CFGS_'):
self.prefix = prefix.upper()
self.sysname, self.nodename, self.osn, self.solv, self.machine = os.uname()
self.data = []
self.smfs = AutoInst()
self.osver = float(self.osn)
if self.osver < 5.11:
pinfo('Only used on Solaris 11 and behond')
return
for k in [ key for key in os.environ if key.startswith(self.prefix)]:
try:
self.data += self.add_fmri(os.environ[k])
except ValueError:
perror('failed to parse variable', os.environ[k])
for f in self.data:
s,p,t,v = self.get_fmri(f['fmri'], f['prop'])
if s is None:
continue
cre = False
if p is None:
if f['create'] == 0:
perror('FMRI:%s, PROP:%s is absent and create is False' %(s,f['prop']))
continue
else:
p = f['prop']
cre = True
if f['inorder'] == 0:
ino = False
else:
ino = True
if f['reload'] == 0:
rel = False
else:
rel = True
self.smfs[f['fmri']][p] = { 'val': f['value'], 'rval': v,
'typ': f['type'] , 'rtyp': t,
'ino': ino,
'cre': cre,
'rel': rel,
'slp': f['sleep']
}
def subst(self, v):
if type(v) == list:
l = []
for _v in v:
l.append(self.subst(_v))
return l
if type(v) != str and type(v) != unicode:
return v
p = re.compile('%%ENV:\w+%%')
for m in p.findall(v):
s = m.strip("%").replace('ENV:', '')
if s in os.environ:
_v = os.environ[s]
elif 'OSVC_COMP_'+s in os.environ:
_v = os.environ['OSVC_COMP_'+s]
else:
perror(s, 'is not an env variable')
raise NotApplicable()
v = v.replace(m, _v)
return v
def add_fmri(self, v):
if type(v) == str or type(v) == unicode:
d = json.loads(v)
else:
d = v
l = []
# recurse if multiple FMRI are specified in a list of dict
if type(d) == list:
for _d in d:
l += self.add_fmri(_d)
return l
if type(d) != dict:
perror("not a dict:", d)
return l
if 'fmri' not in d:
perror('FMRI should be in the dict:', d)
RET = RET_ERR
return l
if 'prop' not in d:
perror('prop should be in the dict:', d)
RET = RET_ERR
return l
if 'value' not in d:
perror('value should be in the dict:', d)
RET = RET_ERR
return l
if 'create' in d:
if d['create'] == 1:
if not 'type' in d:
perror('create True[1] needs a type:', d)
RET = RET_ERR
return l
for k in ('fmri', 'prop', 'value', 'inorder', 'type', 'create', 'sleep'):
if k in d:
d[k] = self.subst(d[k])
return [d]
def fixable(self):
return RET_NA
def get_fmri(self, s, p):
cmd = ['/usr/sbin/svccfg','-s', s, 'listprop', p]
po = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = po.communicate()
out = bdecode(out)
err = bdecode(err)
if po.returncode != 0:
if "doesn't match" in err:
pinfo('%s is absent => IGNORED' %self.service)
return None,None,None,None
else:
perror(' '.join(cmd))
raise ComplianceError()
if len(out) < 2:
return s,None,None,None
x = out.strip('\n').split()
if x[0] != p:
perror(' '.join([s, 'wanted:%s'%p, 'got:%s'%x[0]]))
raise ComplianceError()
return s,p,x[1],x[2:]
def check_smf_prop_cre(self, s, p, verbose=True):
r = RET_OK
if self.smfs[s][p]['cre']:
if verbose:
perror('NOK: %s Prop %s shall be created' %(s,p))
r |= RET_ERR
if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None:
if verbose:
perror('NOK: %s type must be specified to create %s' %(s,p))
return r,self.smfs[s][p]['cre']
def check_smf_prop_typ(self, s, p, verbose=True):
r = RET_OK
if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None:
if verbose:
pinfo('%s Prop %s type is not checked' %(s,p))
elif self.smfs[s][p]['typ'] != self.smfs[s][p]['rtyp']:
if verbose:
perror('NOK: %s Prop %s type Do Not match, got:%s, expected:%s' %(s,p,self.smfs[s][p]['rtyp'],self.smfs[s][p]['typ']))
r |= RET_ERR
else:
if verbose:
pinfo('%s Prop %s type %s is OK' %(s,p,self.smfs[s][p]['typ']))
if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None:
if verbose:
perror('NOK: %s type must be specified to create %s' %(s,p))
return r
def check_smf_prop_val(self, s, p, verbose=True):
r = RET_OK
rvs = ' '.join(self.smfs[s][p]['rval'])
if self.smfs[s][p]['ino']:
if self.smfs[s][p]['val'] == rvs:
if verbose:
pinfo('%s Prop %s values match in right order [%s]' %(s,p,rvs))
else:
if verbose:
perror('NOK: %s Prop %s values Do Not match, got:[%s], expected:[%s]' %(s,p,rvs,self.smfs[s][p]['val']))
r |= RET_ERR
else:
vv = self.smfs[s][p]['val'].split()
m = True
for v in vv:
if not v in self.smfs[s][p]['rval']:
if verbose and len(self.smfs[s][p]['rval']) > 1 :
perror('%s Prop %s notfound %s' %(s,p,v))
m = False
else:
if verbose and len(self.smfs[s][p]['rval']) > 1 :
pinfo('%s Prop %s found %s' %(s,p,v))
if m:
if verbose:
pinfo('%s Prop %s values match [%s]' %(s,p,rvs))
else:
if verbose:
perror('NOK: %s Prop %s values Do Not match, got:[%s], expected:[%s]' %(s,p,rvs,self.smfs[s][p]['val']))
r |= RET_ERR
return r
def check_smfs(self, verbose=True):
r = RET_OK
for s in self.smfs:
for p in self.smfs[s]:
"""
pinfo('FMRI: ', s, 'PROP: ', p, 'TYP: ', self.smfs[s][p]['typ'], 'RTYP: ', self.smfs[s][p]['rtyp'], type(self.smfs[s][p]['val']), type(self.smfs[s][p]['rval']))
pinfo(' ', 'VALS: ', self.smfs[s][p]['val'])
pinfo(' ', 'RVALS: ', self.smfs[s][p]['rval'])
"""
rx,c = self.check_smf_prop_cre(s, p, verbose=verbose)
r |= rx
if not c:
r |= self.check_smf_prop_typ(s, p, verbose=verbose)
r |= self.check_smf_prop_val(s, p, verbose=verbose)
return r
def fix_smfs(self, verbose=False):
r = RET_OK
cmds = []
for s in self.smfs:
for p in self.smfs[s]:
added = False
rx,c = self.check_smf_prop_cre(s, p, verbose=verbose)
vx = self.smfs[s][p]['val'].split()
if c:
if rx == 0 :
pinfo('%s try to add %s %s: = %s' %(s,p,self.smfs[s][p]['typ'],self.smfs[s][p]['val']))
if len(vx) > 1:
sxok = True
for v in vx:
if not (v.startswith('"') and v.endswith('"')):
"""
sxok = False
break
"""
if sxok:
cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['typ']+':', '(%s)'%self.smfs[s][p]['val']])
added = True
else:
perror('NOK: %s prop %s values must be within double quotes [%s]' %(s,p,self.smfs[s][p]['val']))
r |= RET_ERR
else:
cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['typ']+':', self.smfs[s][p]['val']])
added = True
else:
perror('NOK: %s cannot add prop %s without a valid type' %(s,p))
r |= RET_ERR
else:
ry = self.check_smf_prop_val(s, p, verbose=verbose)
if ry != 0:
pinfo('%s try to fix %s = %s' %(s,p,self.smfs[s][p]['val']))
if len(vx) > 1:
sxok = True
for v in vx:
if not (v.startswith('"') and v.endswith('"')):
"""
sxok = False
break
"""
if sxok:
cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', '(%s)'%self.smfs[s][p]['val']])
added = True
else:
perror('NOK: %s prop %s values must be within double quotes [%s]' %(s,p,self.smfs[s][p]['val']))
r |= RET_ERR
else:
cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['val']])
added = True
if added:
if self.smfs[s][p]['rel']:
cmds.append(['/usr/sbin/svcadm', 'refresh' ,s])
if self.smfs[s][p]['slp'] != 0:
cmds.append(['/usr/bin/sleep' , '%d'%self.smfs[s][p]['slp']])
for cmd in cmds:
pinfo('EXEC:', ' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
err = bdecode(err)
if p.returncode != 0:
perror('Code=%s %s' %(p.returncode, err))
r |= RET_ERR
return r
def check(self):
if self.osver < 5.11:
return RET_NA
r = self.check_smfs()
return r
def fix(self):
if self.osver < 5.11:
return RET_NA
r = self.fix_smfs()
return r
if __name__ == "__main__":
syntax = """syntax:
%s check|fixable|fix]"""%sys.argv[0]
try:
action = sys.argv[1]
o = SmfCfgS()
if action == 'check':
RET = o.check()
elif action == 'fix':
RET = o.fix()
elif action == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except NotApplicable:
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
opensvc-1.8~20170412/var/compliance/com.opensvc/group.py 0000755 0001750 0001750 00000017072 13073467726 023137 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_value": """
{
"tibco": {
"gid": 1000,
},
"tibco1": {
"gid": 1001,
}
}
""",
"description": """* Verify a local system group configuration
* A minus (-) prefix to the group name indicates the user should not exist
""",
"form_definition": """
Desc: |
A rule defining a list of Unix groups and their properties. Used by the groups compliance objects.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict of dict
Key: group
EmbedKey: No
Class: group
Inputs:
-
Id: group
Label: Group name
DisplayModeLabel: group
LabelCss: guys16
Mandatory: Yes
Type: string
Help: The Unix group name.
-
Id: gid
Label: Group id
DisplayModeLabel: gid
LabelCss: guys16
Type: string or integer
Help: The Unix gid of this group.
""",
}
import os
import sys
import json
import grp
import re
from subprocess import Popen
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"root",
"bin",
"daemon",
"sys",
"adm",
"tty",
"disk",
"lp",
"mem",
"kmem",
"wheel",
"mail",
"uucp",
"man",
"games",
"gopher",
"video",
"dip",
"ftp",
"lock",
"audio",
"nobody",
"users",
"utmp",
"utempter",
"floppy",
"vcsa",
"cdrom",
"tape",
"dialout",
"saslauth",
"postdrop",
"postfix",
"sshd",
"opensvc",
"mailnull",
"smmsp",
"slocate",
"rpc",
"rpcuser",
"nfsnobody",
"tcpdump",
"ntp"
]
class CompGroup(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.grt = {
'gid': 'gr_gid',
}
self.groupmod_p = {
'gid': '-g',
}
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname == "FreeBSD":
self.groupadd = ["pw", "groupadd"]
self.groupmod = ["pw", "groupmod"]
self.groupdel = ["pw", "groupdel"]
elif self.sysname == 'AIX':
self.groupmod = ['chgroup']
self.groupadd = ['mkgroup']
self.groupdel = ['rmgroup']
self.groupmod_p = {
'gid': 'id',
}
else:
self.groupadd = ["groupadd"]
self.groupmod = ["groupmod"]
self.groupdel = ["groupdel"]
if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1', 'FreeBSD']:
perror('group: module not supported on', self.sysname)
raise NotApplicable
self.groups = {}
for d in self.get_rules():
self.groups.update(d)
for group, d in self.groups.items():
for k in ('uid', 'gid'):
if k in d:
self.groups[group][k] = int(d[k])
def fixable(self):
return RET_NA
def fmt_opt_gen(self, item, target):
return [item, target]
def fmt_opt_aix(self, item, target):
return ['='.join((item, target))]
def fmt_opt(self, item, target):
if self.sysname == 'AIX':
return self.fmt_opt_aix(item, target)
else:
return self.fmt_opt_gen(item, target)
def fix_item(self, group, item, target):
if item in self.groupmod_p:
cmd = [] + self.groupmod
if self.sysname == "FreeBSD":
cmd += [group]
cmd += self.fmt_opt(self.groupmod_p[item], str(target))
if self.sysname != "FreeBSD":
cmd += [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
else:
perror('group: no fix implemented for', item)
return RET_ERR
def check_item(self, group, item, target, current, verbose=False):
if type(current) == int and current < 0:
current += 4294967296
if target == current:
if verbose:
pinfo('group', group, item+':', current)
return RET_OK
else:
if verbose:
perror('group', group, item+':', current, 'target:', target)
return RET_ERR
def try_create_group(self, props):
#
# don't try to create group if passwd db is not 'files'
# beware: 'files' db is the implicit default
#
if 'db' in props and props['db'] != 'files':
return False
if set(self.grt.keys()) <= set(props.keys()):
return True
return False
def check_group_del(self, group):
try:
groupinfo = grp.getgrnam(group)
except KeyError:
pinfo('group', group, 'does not exist, on target')
return RET_OK
perror('group', group, "exists, shouldn't")
return RET_ERR
def check_group(self, group, props):
if group.startswith('-'):
return self.check_group_del(group.lstrip('-'))
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
if self.try_create_group(props):
perror('group', group, 'does not exist')
return RET_ERR
else:
pinfo('group', group, 'does not exist and not enough info to create it')
return RET_OK
for prop in self.grt:
if prop in props:
r |= self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop]), verbose=True)
return r
def create_group(self, group, props):
cmd = [] + self.groupadd
if self.sysname == "FreeBSD":
cmd += [group]
for item in self.grt:
cmd += self.fmt_opt(self.groupmod_p[item], str(props[item]))
if self.sysname != "FreeBSD":
cmd += [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_group_del(self, group):
if group in blacklist:
perror("group", group, "... cowardly refusing to delete")
return RET_ERR
try:
groupinfo = grp.getgrnam(group)
except KeyError:
return RET_OK
cmd = self.groupdel + [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_group(self, group, props):
if group.startswith('-'):
return self.fix_group_del(group.lstrip('-'))
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
if self.try_create_group(props):
return self.create_group(group, props)
else:
perror('group', group, 'does not exist')
return RET_OK
for prop in self.grt:
if prop in props and \
self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop])) != RET_OK:
r |= self.fix_item(group, prop, props[prop])
return r
def check(self):
r = 0
for group, props in self.groups.items():
r |= self.check_group(group, props)
return r
def fix(self):
r = 0
for group, props in self.groups.items():
r |= self.fix_group(group, props)
return r
if __name__ == "__main__":
main(CompGroup)
opensvc-1.8~20170412/var/compliance/com.opensvc/zprop.py 0000755 0001750 0001750 00000010475 13073467726 023155 0 ustar jkelbert jkelbert #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.dirname(__file__))
from utilities import which
from comp import *
from subprocess import *
class CompZprop(CompObject):
def __init__(self, prefix='OSVC_COMP_ZPROP_'):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.data = []
for rule in self.get_rules():
try:
self.data += self.add_rule(rule)
except InitError:
continue
except ValueError:
perror('failed to parse variable', rule)
def add_rule(self, d):
allgood = True
for k in ["name", "prop", "op", "value"]:
if k not in d:
perror('the', k, 'key should be in the dict:', d)
allgood = False
if allgood:
return [d]
return []
def get_prop(self, d):
cmd = [self.zbin, "get", d.get("prop"), d.get("name")]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
out = bdecode(out)
l = [line for line in out.splitlines() if line != ""]
if len(l) != 2:
return
v1 = l[0].split()
v2 = l[1].split()
if len(v1) != len(v2):
return
data = {}
for k, v in zip(v1, v2):
data[k] = v
return data
def check_le(self, current, target):
current = int(current)
if current <= target:
return RET_OK
return RET_ERR
def check_ge(self, current, target):
current = int(current)
if current >= target:
return RET_OK
return RET_ERR
def check_lt(self, current, target):
current = int(current)
if current < target:
return RET_OK
return RET_ERR
def check_gt(self, current, target):
current = int(current)
if current > target:
return RET_OK
return RET_ERR
def check_eq(self, current, target):
if current == str(target):
return RET_OK
return RET_ERR
def fixable(self):
return RET_NA
def fix_zprop(self, d):
if self.check_zprop(d) == RET_OK:
return RET_OK
prop = d.get("prop")
target = d.get("value")
name = d.get("name")
cmd = [self.zbin, "set", prop+"="+target, name]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
perror(err)
return RET_ERR
return RET_OK
def check_zprop(self, d, verbose=False):
v = self.get_prop(d)
prop = d.get("prop")
if v is None:
if verbose:
perror("property", prop, "does not exist")
return RET_ERR
current = v["VALUE"]
op = d.get("op")
target = d.get("value")
if op == "=":
r = self.check_eq(current, target)
elif op == "<=":
r = self.check_le(current, target)
elif op == "<":
r = self.check_lt(current, target)
elif op == ">=":
r = self.check_ge(current, target)
elif op == ">":
r = self.check_gt(current, target)
else:
perror("unsupported operator", op)
return RET_ERR
if verbose:
if r == RET_OK:
pinfo("property %s current value %s is %s %s. on target." % (prop, current, op, target))
else:
pinfo("property %s current value %s is not %s %s." % (prop, current, op, target))
return r
def check_zbin(self):
return which(self.zbin)
def check(self):
if not self.check_zbin():
pinfo(self.zbin, "not found")
return RET_NA
r = 0
for d in self.data:
r |= self.check_zprop(d, verbose=True)
return r
def fix(self):
if not self.check_zbin():
pinfo(self.zbin, "not found")
return RET_NA
r = 0
for d in self.data:
r |= self.fix_zprop(d)
return r
if __name__ == "__main__":
main(CompZprop)
opensvc-1.8~20170412/var/compliance/com.opensvc/rc.py 0000755 0001750 0001750 00000010641 13073467726 022402 0 ustar jkelbert jkelbert #!/usr/bin/env python
"""
[{"service": "foo", "level": "2345", "state": "on"},
{"service": "foo", "level": "016", "state": "off"},
{"service": "bar", "state": "on"},
...]
"""
import os
import sys
import json
import pwd
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompRc(object):
def __init__(self, prefix='OSVC_COMP_RC_'):
self.prefix = prefix.upper()
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.services = []
for k in [key for key in os.environ if key.startswith(self.prefix)]:
try:
l = json.loads(os.environ[k])
for i, d in enumerate(l):
for key, val in d.items():
d[key] = self.subst(val)
l[i] = d
self.services += l
except ValueError:
perror('failed to concatenate', os.environ[k], 'to service list')
self.validate_svcs()
if len(self.services) == 0:
raise NotApplicable()
if self.sysname not in ['Linux', 'HP-UX']:
perror(__file__, 'module not supported on', self.sysname)
raise NotApplicable()
vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown')
release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown')
if vendor in ['CentOS', 'Redhat', 'Red Hat', 'SuSE'] or \
(vendor == 'Oracle' and self.sysname == 'Linux'):
import chkconfig
self.o = chkconfig.Chkconfig()
elif vendor in ['Ubuntu', 'Debian', 'HP']:
import sysvinit
self.o = sysvinit.SysVInit()
else:
perror(vendor, "not supported")
raise NotApplicable()
def subst(self, v):
if type(v) == list:
l = []
for _v in v:
l.append(self.subst(_v))
return l
if type(v) != str and type(v) != unicode:
return v
p = re.compile('%%ENV:\w+%%')
for m in p.findall(v):
s = m.strip("%").replace('ENV:', '')
if s in os.environ:
_v = os.environ[s]
elif 'OSVC_COMP_'+s in os.environ:
_v = os.environ['OSVC_COMP_'+s]
else:
perror(s, 'is not an env variable')
raise NotApplicable()
v = v.replace(m, _v)
return v
def validate_svcs(self):
l = []
for i, svc in enumerate(self.services):
if self.validate_svc(svc) == RET_OK:
l.append(svc)
self.svcs = l
def validate_svc(self, svc):
if 'service' not in svc:
perror(svc, ' rule is malformed ... service key not present')
return RET_ERR
if 'state' not in svc:
perror(svc, ' rule is malformed ... state key not present')
return RET_ERR
return RET_OK
def check_svc(self, svc, verbose=True):
if 'seq' in svc:
seq = svc['seq']
else:
seq = None
return self.o.check_state(svc['service'], svc['level'], svc['state'], seq=seq, verbose=verbose)
def fix_svc(self, svc, verbose=True):
if 'seq' in svc:
seq = svc['seq']
else:
seq = None
if self.check_svc(svc, verbose=False) == RET_OK:
return RET_OK
return self.o.fix_state(svc['service'], svc['level'], svc['state'], seq=seq)
def check(self):
r = 0
for svc in self.services:
r |= self.check_svc(svc)
return r
def fix(self):
r = 0
for svc in self.services:
r |= self.fix_svc(svc)
return r
if __name__ == "__main__":
syntax = """syntax:
%s PREFIX check|fixable|fix"""%sys.argv[0]
if len(sys.argv) != 3:
perror("wrong number of arguments")
perror(syntax)
sys.exit(RET_ERR)
try:
o = CompRc(sys.argv[1])
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except NotApplicable:
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
opensvc-1.8~20170412/var/compliance/com.opensvc/package.py 0000755 0001750 0001750 00000063226 13073467726 023400 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_PACKAGES_",
"example_value": """
[
"bzip2",
"-zip",
"zip"
]
""",
"description": """* Verify a list of packages is installed or removed
* A '-' prefix before the package name means the package should be removed
* No prefix before the package name means the package should be installed
* The package version is not checked
""",
"form_definition": """
Desc: |
A rule defining a set of packages, fed to the 'packages' compliance object for it to check each package installed or not-installed status.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: package
Type: json
Format: list
Inputs:
-
Id: pkgname
Label: Package name
DisplayModeLabel: ""
LabelCss: pkg16
Mandatory: Yes
Help: Use '-' as a prefix to set 'not installed' as the target state. Use '*' as a wildcard for package name expansion for operating systems able to list packages available for installation.
Type: string
""",
}
import os
import re
import sys
import json
import pwd
import tempfile
from subprocess import *
from utilities import which
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompPackages(CompObject):
def __init__(self, prefix='OSVC_COMP_PACKAGES_', uri=None):
CompObject.__init__(self, prefix=prefix, data=data)
self.uri = uri
def init(self):
self.combo_fix = False
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.known_archs = ['i386', 'i586', 'i686', 'x86_64', 'noarch', '*']
if self.sysname not in ['Linux', 'AIX', 'HP-UX', 'SunOS', 'FreeBSD']:
perror(__file__, 'module not supported on', self.sysname)
raise NotApplicable()
if 'OSVC_COMP_PACKAGES_PKG_TYPE' in os.environ and \
os.environ['OSVC_COMP_PACKAGES_PKG_TYPE'] == "bundle":
self.pkg_type = 'bundle'
else:
self.pkg_type = 'product'
self.packages = self.get_rules()
if len(self.packages) == 0:
raise NotApplicable()
self.data = {}
l = []
for pkg in self.packages:
if type(pkg) == dict:
l.append(pkg['pkgname'])
self.data[pkg['pkgname']] = pkg
if len(l) > 0:
self.packages = l
vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown')
release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown')
if vendor in ['Debian', 'Ubuntu']:
self.get_installed_packages = self.deb_get_installed_packages
self.pkg_add = self.apt_fix_pkg
self.pkg_del = self.apt_del_pkg
elif vendor in ['CentOS', 'Redhat', 'Red Hat'] or \
(vendor == 'Oracle' and self.sysname == 'Linux'):
if which("yum") is None:
perror("package manager not found (yum)")
raise ComplianceError()
self.combo_fix = True
self.get_installed_packages = self.rpm_get_installed_packages
self.pkg_add = self.yum_fix_pkg
self.pkg_del = self.yum_del_pkg
elif vendor == "SuSE":
if which("zypper") is None:
perror("package manager not found (zypper)")
raise ComplianceError()
self.get_installed_packages = self.rpm_get_installed_packages
self.pkg_add = self.zyp_fix_pkg
self.pkg_del = self.zyp_del_pkg
elif vendor == "FreeBSD":
if which("pkg") is None:
perror("package manager not found (pkg)")
raise ComplianceError()
self.get_installed_packages = self.freebsd_pkg_get_installed_packages
self.pkg_add = self.freebsd_pkg_fix_pkg
self.pkg_del = self.freebsd_pkg_del_pkg
elif vendor in ['IBM']:
self.get_installed_packages = self.aix_get_installed_packages
self.pkg_add = self.aix_fix_pkg
self.pkg_del = self.aix_del_pkg
if self.uri is None:
perror("resource must be set")
raise NotApplicable()
elif vendor in ['HP']:
self.get_installed_packages = self.hp_get_installed_packages
self.pkg_add = self.hp_fix_pkg
self.pkg_del = self.hp_del_pkg
elif vendor in ['Oracle']:
self.get_installed_packages = self.sol_get_installed_packages
self.pkg_add = self.sol_fix_pkg
self.pkg_del = self.sol_del_pkg
else:
perror(vendor, "not supported")
raise NotApplicable()
self.load_reloc()
self.packages = map(lambda x: x.strip(), self.packages)
self.expand_pkgnames()
self.installed_packages = self.get_installed_packages()
def load_reloc(self):
self.reloc = {}
for i, pkgname in enumerate(self.packages):
l = pkgname.split(':')
if len(l) != 2:
continue
self.packages[i] = l[0]
self.reloc[l[0]] = l[1]
def expand_pkgnames(self):
""" Expand wildcards and implicit arch
"""
l = []
for pkgname in self.packages:
if (pkgname.startswith('-') or pkgname.startswith('+')) and len(pkgname) > 1:
prefix = pkgname[0]
pkgname = pkgname[1:]
else:
prefix = ''
l += map(lambda x: prefix+x, self.expand_pkgname(pkgname, prefix))
self.packages = l
def expand_pkgname(self, pkgname, prefix):
vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown')
release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown')
if vendor in ['CentOS', 'Redhat', 'Red Hat'] or (vendor == 'Oracle' and release.startswith('VM ')):
return self.yum_expand_pkgname(pkgname, prefix)
elif vendor == 'SuSE':
return self.zyp_expand_pkgname(pkgname, prefix)
elif vendor in ['IBM']:
return self.aix_expand_pkgname(pkgname, prefix)
return [pkgname]
def aix_expand_pkgname(self, pkgname, prefix=''):
"""
LGTOnw.clnt:LGTOnw.clnt.rte:8.1.1.6::I:C:::::N:NetWorker Client::::0::
LGTOnw.man:LGTOnw.man.rte:8.1.1.6::I:C:::::N:NetWorker Man Pages::::0::
or for rpm lpp_source:
zlib ALL @@R:zlib _all_filesets
@@R:zlib-1.2.7-2 1.2.7-2
"""
if not hasattr(self, "nimcache"):
cmd = ['nimclient', '-o', 'showres', '-a', 'resource=%s'%self.uri, '-a', 'installp_flags=L']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
err = bdecode(err)
self.lpp_type = "installp"
if "0042-175" in err:
# not a native installp lpp_source
cmd = ['nimclient', '-o', 'showres', '-a', 'resource=%s'%self.uri]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.lpp_type = "rpm"
out = bdecode(out)
self.nimcache = out.splitlines()
l = []
if self.lpp_type == "rpm":
l = self.aix_expand_pkgname_rpm(pkgname, prefix=prefix)
elif self.lpp_type == "native":
l = self.aix_expand_pkgname_native(pkgname, prefix=prefix)
if len(l) == 0:
l = [pkgname]
return l
def aix_expand_pkgname_rpm(self, pkgname, prefix=''):
import fnmatch
l = []
for line in self.nimcache:
line = line.strip()
if len(line) == 0:
continue
words = line.split()
if line.startswith("@@") and len(words) > 1:
_pkgvers = words[1]
if fnmatch.fnmatch(_pkgname, pkgname) and _pkgname not in l:
l.append(_pkgname)
else:
_pkgname = words[0]
return l
def aix_expand_pkgname_native(self, pkgname, prefix=''):
import fnmatch
l = []
for line in self.nimcache:
words = line.split(':')
if len(words) < 5:
continue
_pkgvers = words[2]
_pkgname = words[1].replace('-'+_pkgvers, '')
if fnmatch.fnmatch(_pkgname, pkgname) and _pkgname not in l:
l.append(_pkgname)
return l
def zyp_expand_pkgname(self, pkgname, prefix=''):
arch_specified = False
for arch in self.known_archs:
if pkgname.endswith(arch):
arch_specified = True
cmd = ['zypper', '--non-interactive', 'packages']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
if prefix != '-':
perror('can not expand (cmd error)', pkgname, err)
return []
else:
return [pkgname]
out = bdecode(out)
lines = out.splitlines()
if len(lines) < 2:
if prefix != '-':
perror('can not expand', pkgname)
return []
else:
return [pkgname]
for i, line in enumerate(lines):
if "--+--" in line:
break
lines = lines[i+1:]
l = []
for line in lines:
words = map(lambda x: x.strip(), line.split(" | "))
if len(words) != 5:
continue
_status, _repo, _name, _version, _arch = words
if arch_specified:
if _name != pkgname or (arch != '*' and arch != _arch):
continue
else:
if _name != pkgname:
continue
_pkgname = '.'.join((_name, _arch))
if _pkgname in l:
continue
l.append(_pkgname)
if arch_specified or len(l) == 1:
return l
if os.environ['OSVC_COMP_NODES_OS_ARCH'] in ('i386', 'i586', 'i686', 'ia32'):
archs = ('i386', 'i586', 'i686', 'ia32', 'noarch')
else:
archs = (os.environ['OSVC_COMP_NODES_OS_ARCH'], 'noarch')
ll = []
for pkgname in l:
if pkgname.split('.')[-1] in archs:
# keep only packages matching the arch
ll.append(pkgname)
return ll
def yum_expand_pkgname(self, pkgname, prefix=''):
arch_specified = False
for arch in self.known_archs:
if pkgname.endswith(arch):
arch_specified = True
cmd = ['yum', 'list', pkgname]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
if prefix != '-':
perror('can not expand (cmd error)', pkgname, err)
return []
else:
return [pkgname]
out = bdecode(out)
lines = out.splitlines()
if len(lines) < 2:
if prefix != '-':
perror('can not expand', pkgname)
return []
else:
return [pkgname]
lines = lines[1:]
l = []
for line in lines:
words = line.split()
if len(words) != 3:
continue
if words[0] in ("Installed", "Available", "Loaded", "Updating"):
continue
if words[0] in l:
continue
l.append((words[0], words[1]))
ll = []
ix86_added = False
from distutils.version import LooseVersion as V
for _pkgname, _version in sorted(l, key=lambda x: V(x[1]), reverse=True):
pkgarch = _pkgname.split('.')[-1]
if pkgarch not in ('i386', 'i586', 'i686', 'ia32'):
#pinfo("add", _pkgname, "because", pkgarch, "not in ('i386', 'i586', 'i686', 'ia32')")
ll.append(_pkgname)
elif not ix86_added:
#pinfo("add", _pkgname, "because", pkgarch, "not ix86_added")
ll.append(_pkgname)
ix86_added = True
l = ll
if arch_specified or len(l) == 1:
return l
if os.environ['OSVC_COMP_NODES_OS_ARCH'] in ('i386', 'i586', 'i686', 'ia32'):
archs = ('i386', 'i586', 'i686', 'ia32', 'noarch')
else:
archs = (os.environ['OSVC_COMP_NODES_OS_ARCH'], 'noarch')
ll = []
for pkgname in l:
pkgarch = pkgname.split('.')[-1]
if pkgarch not in archs:
# keep only packages matching the arch
continue
ll.append(pkgname)
return ll
def hp_parse_swlist(self, out):
l = {}
for line in out.split('\n'):
if line.startswith('#') or len(line) == 0:
continue
v = line.split()
if len(v) < 2:
continue
if v[0] in l:
l[v[0]] += [(v[1], "")]
else:
l[v[0]] = [(v[1], "")]
return l
def hp_del_pkg(self, pkg):
perror("TODO:", __fname__)
return RET_ERR
def hp_fix_pkg(self, pkg):
if pkg in self.reloc:
pkg = ':'.join((pkg, self.reloc[pkg]))
cmd = ['swinstall',
'-x', 'allow_downdate=true',
'-x', 'mount_all_filesystems=false',
'-s', self.uri, pkg]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if len(out) > 0:
pinfo(out)
if len(err) > 0:
perror(err)
if p.returncode != 0:
return RET_ERR
return RET_OK
def hp_get_installed_packages(self):
p = Popen(['swlist', '-l', self.pkg_type], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
out = bdecode(out)
return self.hp_parse_swlist(out).keys()
def get_free(self, c):
if not os.path.exists(c):
return 0
cmd = ["df", "-k", c]
p = Popen(cmd, stdout=PIPE, stderr=None)
out, err = p.communicate()
out = bdecode(out)
for line in out.split():
if "%" in line:
l = out.split()
for i, w in enumerate(l):
if '%' in w:
break
try:
f = int(l[i-1])
return f
except:
return 0
return 0
def get_temp_dir(self):
if hasattr(self, "tmpd"):
return self.tmpd
candidates = ["/tmp", "/var/tmp", "/root"]
free = {}
for c in candidates:
free[self.get_free(c)] = c
max = sorted(free.keys())[-1]
self.tmpd = free[max]
pinfo("selected %s as temp dir (%d KB free)" % (self.tmpd, max))
return self.tmpd
def download(self, pkg_name):
import urllib
import tempfile
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir())
dname = f.name
f.close()
try:
os.makedirs(dname)
except:
pass
fname = os.path.join(dname, "file")
try:
self.urllib.urlretrieve(pkg_name, fname)
except IOError:
try:
os.unlink(fname)
os.unlink(dname)
except:
pass
raise Exception("download failed: %s" % str(e))
import tarfile
os.chdir(dname)
try:
tar = tarfile.open(fname)
except:
pinfo("not a tarball")
return fname
try:
tar.extractall()
except:
try:
os.unlink(fname)
os.unlink(dname)
except:
pass
# must be a pkg
return dname
tar.close()
os.unlink(fname)
return dname
def get_os_ver(self):
cmd = ['uname', '-v']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return 0
out = bdecode(out)
lines = out.splitlines()
if len(lines) == 0:
return 0
try:
osver = float(lines[0])
except:
osver = 0
return osver
def sol_get_installed_packages(self):
p = Popen(['pkginfo', '-l'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
l = []
out = bdecode(out)
for line in out.splitlines():
v = line.split(':')
if len(v) != 2:
continue
f = v[0].strip()
if f == "PKGINST":
pkgname = v[1].strip()
l.append(pkgname)
return l
def sol_del_pkg(self, pkg):
if pkg not in self.installed_packages:
return RET_OK
yes = os.path.dirname(__file__) + "/yes"
cmd = '%s | pkgrm %s' % (yes, pkg)
pinfo(cmd)
r = os.system(cmd)
if r != 0:
return RET_ERR
return RET_OK
def sol_fix_pkg(self, pkg):
data = self.data[pkg]
if 'repo' not in data or len(data['repo']) == 0:
perror("no repo specified in the rule")
return RET_NA
if data['repo'].endswith("/"):
pkg_url = data['repo']+"/"+pkg
else:
pkg_url = data['repo']
pinfo("download", pkg_url)
try:
dname = self.download(pkg_url)
except Exception as e:
perror(e)
return RET_ERR
if os.path.isfile(dname):
d = dname
else:
d = "."
os.chdir(dname)
if self.get_os_ver() < 10:
opts = ''
else:
opts = '-G'
if 'resp' in data and len(data['resp']) > 0:
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir())
resp = f.name
f.close()
with open(resp, "w") as f:
f.write(data['resp'])
else:
resp = "/dev/null"
yes = os.path.dirname(__file__) + "/yes"
cmd = '%s | pkgadd -r %s %s -d %s all' % (yes, resp, opts, d)
pinfo(cmd)
r = os.system(cmd)
os.chdir("/")
if os.path.isdir(dname):
import shutil
shutil.rmtree(dname)
if r != 0:
return RET_ERR
return RET_OK
def aix_del_pkg(self, pkg):
cmd = ['installp', '-u', pkg]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if len(out) > 0:
pinfo(out)
if len(err) > 0:
perror(err)
if p.returncode != 0:
return RET_ERR
return RET_OK
def aix_fix_pkg(self, pkg):
cmd = ['nimclient', '-o', 'cust',
'-a', 'lpp_source=%s'%self.uri,
'-a', 'installp_flags=Y',
'-a', 'filesets=%s'%pkg]
s = " ".join(cmd)
pinfo(s)
r = os.system(s)
if r != 0:
return RET_ERR
return RET_OK
def aix_get_installed_packages(self):
cmd = ['lslpp', '-Lc']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
pkgs = []
out = bdecode(out)
for line in out.splitlines():
l = line.split(':')
if len(l) < 5:
continue
pkgvers = l[2]
pkgname = l[1].replace('-'+pkgvers, '')
pkgs.append(pkgname)
return pkgs
def freebsd_pkg_get_installed_packages(self):
p = Popen(['pkg', 'info'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
l = []
out = bdecode(out)
for line in out.splitlines():
try:
i = line.index(" ")
line = line[:i]
i = line.rindex("-")
l.append(line[:i])
except ValueError:
pass
return l
def rpm_get_installed_packages(self):
p = Popen(['rpm', '-qa', '--qf', '%{NAME}.%{ARCH}\n'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
out = bdecode(out)
return out.splitlines()
def deb_get_installed_packages(self):
p = Popen(['dpkg', '-l'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return []
l = []
out = bdecode(out)
for line in out.splitlines():
if not line.startswith('ii'):
continue
pkgname = line.split()[1]
pkgname = pkgname.split(':')[0]
l.append(pkgname)
return l
def freebsd_pkg_del_pkg(self, pkg):
cmd = ['pkg', 'remove', '-y', pkg]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def freebsd_pkg_fix_pkg(self, pkg):
cmd = ['pkg', 'install', '-y', pkg]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def zyp_del_pkg(self, pkg):
cmd = ['zypper', 'remove', '-y', pkg]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def zyp_fix_pkg(self, pkg):
cmd = ['zypper', 'install', '-y', pkg]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def yum_del_pkg(self, pkg):
if type(pkg) == list:
cmd = ['yum', '-y', 'remove'] + pkg
else:
cmd = ['yum', '-y', 'remove', pkg]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def yum_fix_pkg(self, pkg):
cmd = ['yum', '-y', 'install'] + pkg
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
err = bdecode(err)
if len(err) > 0:
pinfo(err)
return RET_ERR
return RET_OK
def apt_del_pkg(self, pkg):
r = call(['apt-get', 'remove', '-y', pkg])
if r != 0:
return RET_ERR
return RET_OK
def apt_fix_pkg(self, pkg):
r = call(['apt-get', 'install', '--allow-unauthenticated', '-y', pkg])
if r != 0:
return RET_ERR
return RET_OK
def fixable(self):
return RET_NA
def fix_pkg_combo(self):
l_add = []
l_del = []
for pkg in self.packages:
if pkg.startswith('-') and len(pkg) > 1:
l_del.append(pkg[1:])
elif pkg.startswith('+') and len(pkg) > 1:
l_add.append(pkg[1:])
else:
l_add.append(pkg)
if len(l_add) > 0:
r = self.pkg_add(l_add)
if r != RET_OK:
return r
if len(l_del) > 0:
r = self.pkg_del(l_del)
if r != RET_OK:
return r
return RET_OK
def fix_pkg(self, pkg):
if pkg.startswith('-') and len(pkg) > 1:
return self.pkg_del(pkg[1:])
if pkg.startswith('+') and len(pkg) > 1:
return self.pkg_add(pkg[1:])
else:
return self.pkg_add(pkg)
def check_pkg(self, pkg, verbose=True):
if pkg.startswith('-') and len(pkg) > 1:
return self.check_pkg_del(pkg[1:], verbose)
if pkg.startswith('+') and len(pkg) > 1:
return self.check_pkg_add(pkg[1:], verbose)
else:
return self.check_pkg_add(pkg, verbose)
def check_pkg_del(self, pkg, verbose=True):
if pkg in self.installed_packages:
if verbose:
perror('package', pkg, 'is installed')
return RET_ERR
if verbose:
pinfo('package', pkg, 'is not installed')
return RET_OK
def check_pkg_add(self, pkg, verbose=True):
if not pkg in self.installed_packages:
if verbose:
perror('package', pkg, 'is not installed')
return RET_ERR
if verbose:
pinfo('package', pkg, 'is installed')
return RET_OK
def check(self):
r = 0
for pkg in self.packages:
r |= self.check_pkg(pkg)
return r
def fix(self):
r = 0
if self.combo_fix:
return self.fix_pkg_combo()
for pkg in self.packages:
if self.check_pkg(pkg, verbose=False) == RET_OK:
continue
r |= self.fix_pkg(pkg)
return r
if __name__ == "__main__":
main(CompPackages)
opensvc-1.8~20170412/var/compliance/com.opensvc/fileinc.py 0000755 0001750 0001750 00000021261 13073467726 023407 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_FILEINC_",
"example_value": """
{
"path": "/tmp/foo",
"check": ".*some pattern.*",
"fmt": "full added content with %%HOSTNAME%%@corp.com: some pattern into the file."
}
""",
"description": """* Verify file content.
* The collector provides the format with wildcards.
* The module replace the wildcards with contextual values.
* The fmt must match the check pattern
Wildcards:
%%ENV:VARNAME%% Any environment variable value
%%HOSTNAME%% Hostname
%%SHORT_HOSTNAME%% Short hostname
""",
"form_definition": """
Desc: |
A fileinc rule, fed to the 'fileinc' compliance object to verify a line matching the 'check' regular expression is present in the specified file.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: fileinc
Type: json
Format: dict
Inputs:
-
Id: path
Label: Path
DisplayModeLabel: path
LabelCss: hd16
Mandatory: Yes
Help: File path to search the matching line into.
Type: string
-
Id: check
Label: Check regexp
DisplayModeLabel: check
LabelCss: action16
Mandatory: Yes
Help: A regular expression. Matching the regular expression is sufficent to grant compliancy.
Type: string
-
Id: fmt
Label: Format
DisplayModeLabel: fmt
LabelCss: action16
Help: The line installed if the check pattern is not found in the file.
Type: string
-
Id: ref
Label: URL to format
DisplayModeLabel: ref
LabelCss: loc
Help: An URL pointing to a file containing the line installed if the check pattern is not found in the file.
Type: string
""",
}
import os
import sys
import json
import stat
import re
import urllib
import tempfile
import codecs
sys.path.append(os.path.dirname(__file__))
from comp import *
MAXSZ = 8*1024*1024
class CompFileInc(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.files = {}
self.ok = {}
self.checks = []
self.upds = {}
self.sysname, self.nodename, x, x, self.machine = os.uname()
for rule in self.get_rules():
self.add_rule(rule)
if len(self.checks) == 0:
raise NotApplicable()
def fixable(self):
return RET_NA
def parse_fmt(self, x):
if isinstance(x, int):
x = str(x)
x = x.replace('%%HOSTNAME%%', self.nodename)
x = x.replace('%%SHORT_HOSTNAME%%', self.nodename.split('.')[0])
return x
def parse_ref(self, url):
f = tempfile.NamedTemporaryFile()
tmpf = f.name
try:
self.urlretrieve(url, tmpf)
f.close()
except Exception as e:
perror(url, "download error:", e)
return ''
content = unicode(f.read())
return self.parse_fmt(content)
def read_file(self, path):
if not os.path.exists(path):
return ''
out = ''
try :
f = codecs.open(path, 'r', encoding="utf8", errors="ignore")
out = f.read().rstrip('\n')
f.close()
except IOError as e:
pinfo("cannot read '%s', error=%d - %s" %(path, e.errno, str(e)))
raise
except:
perror("Cannot open '%s', unexpected error: %s"%(path, sys.exc_info()[0]))
raise
return out
def add_rule(self, d):
r = RET_OK
if 'path' not in d:
perror("'path' should be defined:", d)
r |= RET_ERR
if 'fmt' in d and 'ref' in d:
perror("'fmt' and 'ref' are exclusive:", d)
r |= RET_ERR
if 'path' in d:
d['path'] = d['path'].strip()
if 'ref' in d:
d['ref'] = d['ref'].strip()
if not d['path'] in self.upds:
self.upds[d['path']] = 0
if not d['path'] in self.files:
try:
fsz = os.path.getsize(d['path'])
except:
fsz = 0
if fsz > MAXSZ:
self.ok[d['path']] = 0
self.files[d['path']] = ''
perror("file '%s' is too large [%.2f Mb] to fit" %(d['path'], fsz/(1024.*1024)))
r |= RET_ERR
else:
try:
self.files[d['path']] = self.read_file(d['path'])
self.ok[d['path']] = 1
except:
self.files[d['path']] = ""
self.ok[d['path']] = 0
r |= RET_ERR
c = ''
if 'fmt' in d:
c = self.parse_fmt(d['fmt'])
elif 'ref' in d:
c = self.parse_ref(d['ref'])
else:
perror("'fmt' or 'ref' should be defined:", d)
r |= RET_ERR
c = c.strip()
if re.match(d['check'], c) is not None or len(c) == 0:
val = True
else:
val = False
r |= RET_ERR
self.checks.append({'check':d['check'], 'path':d['path'], 'add':c, 'valid':val})
return r
def check(self):
r = RET_OK
for ck in self.checks:
if not ck['valid']:
perror("rule error: '%s' does not match target content" % ck['check'])
r |= RET_ERR
continue
if self.ok[ck['path']] != 1:
r |= RET_ERR
continue
pr = RET_OK
m = 0
ok = 0
lines = self.files[ck['path']].split('\n')
for line in lines:
if re.match(ck['check'], line):
m += 1
if len(ck['add']) > 0 and line == ck['add']:
pinfo("line '%s' found in '%s'" %(line, ck['path']))
ok += 1
if m > 1:
perror("duplicate match of pattern '%s' in '%s'"%(ck['check'], ck['path']))
pr |= RET_ERR
if len(ck['add']) == 0:
if m > 0:
perror("pattern '%s' found in %s"%(ck['check'], ck['path']))
pr |= RET_ERR
else:
pinfo("pattern '%s' not found in %s"%(ck['check'], ck['path']))
elif ok == 0:
perror("line '%s' not found in %s"%(ck['add'], ck['path']))
pr |= RET_ERR
elif m == 0:
perror("pattern '%s' not found in %s"%(ck['check'], ck['path']))
pr |= RET_ERR
r |= pr
return r
def rewrite_files(self):
r = RET_OK
for path in self.files:
if self.upds[path] == 0:
continue
if self.ok[path] != 1:
r |= RET_ERR
continue
if not os.path.exists(path):
perror("'%s' will be created, please check owner and permissions" %path)
try:
f = codecs.open(path, 'w', encoding="utf8")
f.write(self.files[path])
f.close()
pinfo("'%s' successfully rewritten" %path)
except:
perror("failed to rewrite '%s'" %path)
r |= RET_ERR
return r
def fix(self):
r = RET_OK
for ck in self.checks:
if not ck['valid']:
perror("rule error: '%s' does not match target content" % ck['check'])
r |= RET_ERR
continue
if self.ok[ck['path']] != 1:
r |= RET_ERR
continue
need_rewrite = False
m = 0
lines = self.files[ck['path']].rstrip('\n').split('\n')
for i, line in enumerate(lines):
if re.match(ck['check'], line):
m += 1
if m == 1:
if line != ck['add']:
# rewrite line
pinfo("rewrite %s:%d:'%s', new content: '%s'" %(ck['path'], i, line, ck['add']))
lines[i] = ck['add']
need_rewrite = True
elif m > 1:
# purge dup
pinfo("remove duplicate line %s:%d:'%s'" %(ck['path'], i, line))
lines[i] = ""
need_rewrite = True
if m == 0 and len(ck['add']) > 0:
pinfo("add line '%s' to %s"%(ck['add'], ck['path']))
lines.append(ck['add'])
need_rewrite = True
if need_rewrite:
self.files[ck['path']] = '\n'.join(lines).rstrip("\n")+"\n"
self.upds[ck['path']] = 1
r |= self.rewrite_files()
return r
if __name__ == "__main__":
main(CompFileInc)
opensvc-1.8~20170412/var/compliance/com.opensvc/utilities.py 0000644 0001750 0001750 00000001360 13073467726 024004 0 ustar jkelbert jkelbert #!/usr/bin/env python
from __future__ import print_function
import os
import sys
def is_exe(fpath):
"""Returns True if file path is executable, False otherwize
does not follow symlink
"""
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Returns True if program is in PATH and executable, False
otherwize
"""
fpath, fname = os.path.split(program)
if fpath and is_exe(program):
return program
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if __name__ == "__main__":
print("this file is for import into compliance objects", file=sys.stderr)
opensvc-1.8~20170412/var/compliance/com.opensvc/process.py 0000755 0001750 0001750 00000032004 13073467726 023451 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_PROC_",
"example_value": """
[
{
"comm": "foo",
"uid": 2345,
"state": "on",
"user": "foou"
},
{
"comm": "bar",
"state": "off",
"uid": 2345
}
]
""",
"description": """* Checks if a process is present, specifying its comm, and optionnaly its owner's uid and/or username.
""",
"form_definition": """
Desc: |
A rule defining a process that should be running or not running on the target host, its owner's username and the command to launch it or to stop it.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: process
Inputs:
-
Id: comm
Label: Command
DisplayModeLabel: comm
LabelCss: action16
Mandatory: No
Type: string
Help: The Unix process command, as shown in the ps comm column.
-
Id: args
Label: Arguments
DisplayModeLabel: args
LabelCss: action16
Mandatory: No
Type: string
Help: The Unix process arguments, as shown in the ps args column.
-
Id: state
Label: State
DisplayModeLabel: state
LabelCss: action16
Type: string
Mandatory: Yes
Default: on
Candidates:
- "on"
- "off"
Help: The expected process state.
-
Id: uid
Label: Owner user id
DisplayModeLabel: uid
LabelCss: guy16
Type: integer
Help: The Unix user id owning the process.
-
Id: user
Label: Owner user name
DisplayModeLabel: user
LabelCss: guy16
Type: string
Help: The Unix user name owning the process.
-
Id: start
Label: Start command
DisplayModeLabel: start
LabelCss: action16
Type: string
Help: The command to start or stop the process, including the executable arguments. The executable must be defined with full path.
""",
}
import os
import sys
import json
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
from utilities import which
class CompProcess(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname not in ['Linux', 'AIX', 'SunOS', 'FreeBSD', 'Darwin', 'HP-UX']:
perror('module not supported on', self.sysname)
raise NotApplicable()
if self.sysname == 'HP-UX' and 'UNIX95' not in os.environ:
os.environ['UNIX95'] = ""
self.process = self.get_rules()
self.validate_process()
if len(self.process) == 0:
raise NotApplicable()
self.load_ps()
def load_ps_args(self):
self.ps_args = {}
cmd = ['ps', '-e', '-o', 'pid,uid,user,args']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror("unable to fetch ps")
raise ComplianceError
out = bdecode(out)
lines = out.splitlines()
if len(lines) < 2:
return
for line in lines[1:]:
l = line.split()
if len(l) < 4:
continue
pid, uid, user = l[:3]
args = " ".join(l[3:])
if args not in self.ps_args:
self.ps_args[args] = [(pid, int(uid), user)]
else:
self.ps_args[args].append((pid, int(uid), user))
def load_ps_comm(self):
self.ps_comm = {}
cmd = ['ps', '-e', '-o', 'comm,pid,uid,user']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror("unable to fetch ps")
raise ComplianceError
out = bdecode(out)
lines = out.splitlines()
if len(lines) < 2:
return
for line in lines[1:]:
l = line.split()
if len(l) != 4:
continue
comm, pid, uid, user = l
if comm not in self.ps_comm:
self.ps_comm[comm] = [(pid, int(uid), user)]
else:
self.ps_comm[comm].append((pid, int(uid), user))
def load_ps(self):
self.load_ps_comm()
self.load_ps_args()
def validate_process(self):
l = []
for process in self.process:
if self._validate_process(process) == RET_OK:
l.append(process)
self.process = l
def _validate_process(self, process):
if 'comm' not in process and 'args' not in process:
perror(process, 'rule is malformed ... nor comm nor args key present')
return RET_ERR
if 'uid' in process and type(process['uid']) != int:
perror(process, 'rule is malformed ... uid value must be integer')
return RET_ERR
return RET_OK
def get_keys_args(self, args):
found = []
for key in self.ps_args:
if re.match(args, key) is not None:
found.append(key)
return found
def get_keys_comm(self, comm):
found = []
for key in self.ps_comm:
if re.match(comm, key) is not None:
found.append(key)
return found
def check_present_args(self, args, verbose):
if len(args.strip()) == 0:
return RET_OK
found = self.get_keys_args(args)
if len(found) == 0:
if verbose:
perror('process with args', args, 'is not started ... should be')
return RET_ERR
else:
if verbose:
pinfo('process with args', args, 'is started ... on target')
return RET_OK
def check_present_comm(self, comm, verbose):
if len(comm.strip()) == 0:
return RET_OK
found = self.get_keys_comm(comm)
if len(found) == 0:
if verbose:
perror('process with command', comm, 'is not started ... should be')
return RET_ERR
else:
if verbose:
pinfo('process with command', comm, 'is started ... on target')
return RET_OK
def check_present(self, process, verbose):
r = RET_OK
if 'comm' in process:
r |= self.check_present_comm(process['comm'], verbose)
if 'args' in process:
r |= self.check_present_args(process['args'], verbose)
return r
def check_not_present_comm(self, comm, verbose):
if len(comm.strip()) == 0:
return RET_OK
found = self.get_keys_comm(comm)
if len(found) == 0:
if verbose:
pinfo('process with command', comm, 'is not started ... on target')
return RET_OK
else:
if verbose:
perror('process with command', comm, 'is started ... shoud be')
return RET_ERR
def check_not_present_args(self, args, verbose):
if len(args.strip()) == 0:
return RET_OK
found = self.get_keys_args(args)
if len(found) == 0:
if verbose:
pinfo('process with args', args, 'is not started ... on target')
return RET_OK
else:
if verbose:
perror('process with args', args, 'is started ... shoud be')
return RET_ERR
def check_not_present(self, process, verbose):
r = 0
if 'comm' in process:
r |= self.check_not_present_comm(process['comm'], verbose)
if 'args' in process:
r |= self.check_not_present_args(process['args'], verbose)
return r
def check_process(self, process, verbose=True):
r = RET_OK
if process['state'] == 'on':
r |= self.check_present(process, verbose)
if r == RET_ERR:
return RET_ERR
if 'uid' in process:
r |= self.check_uid(process, process['uid'], verbose)
if 'user' in process:
r |= self.check_user(process, process['user'], verbose)
else:
r |= self.check_not_present(process, verbose)
return r
def check_uid(self, process, uid, verbose):
if 'args' in process:
return self.check_uid_args(process['args'], uid, verbose)
if 'comm' in process:
return self.check_uid_comm(process['comm'], uid, verbose)
def check_uid_comm(self, comm, uid, verbose):
if len(comm.strip()) == 0:
return RET_OK
found = False
keys = self.get_keys_comm(comm)
for key in keys:
for _pid, _uid, _user in self.ps_comm[key]:
if uid == _uid:
found = True
continue
if found:
if verbose:
pinfo('process with command', comm, 'runs with uid', _uid, '... on target')
else:
if verbose:
perror('process with command', comm, 'does not run with uid', _uid, '... should be')
return RET_ERR
return RET_OK
def check_uid_args(self, args, uid, verbose):
if len(args.strip()) == 0:
return RET_OK
found = False
keys = self.get_keys_args(args)
for key in keys:
for _pid, _uid, _user in self.ps_args[key]:
if uid == _uid:
found = True
continue
if found:
if verbose:
pinfo('process with args', args, 'runs with uid', _uid, '... on target')
else:
if verbose:
perror('process with args', args, 'does not run with uid', _uid, '... should be')
return RET_ERR
return RET_OK
def check_user(self, process, user, verbose):
if 'args' in process:
return self.check_user_args(process['args'], user, verbose)
if 'comm' in process:
return self.check_user_comm(process['comm'], user, verbose)
def check_user_comm(self, comm, user, verbose):
if len(comm.strip()) == 0:
return RET_OK
if user is None or len(user) == 0:
return RET_OK
found = False
keys = self.get_keys_comm(comm)
for key in keys:
for _pid, _uid, _user in self.ps_comm[key]:
if user == _user:
found = True
continue
if found:
if verbose:
pinfo('process with command', comm, 'runs with user', _user, '... on target')
else:
if verbose:
perror('process with command', comm, 'runs with user', _user, '... should run with user', user)
return RET_ERR
return RET_OK
def check_user_args(self, args, user, verbose):
if len(args.strip()) == 0:
return RET_OK
if user is None or len(user) == 0:
return RET_OK
found = False
keys = self.get_keys_args(args)
for key in keys:
for _pid, _uid, _user in self.ps_args[key]:
if user == _user:
found = True
continue
if found:
if verbose:
pinfo('process with args', args, 'runs with user', _user, '... on target')
else:
if verbose:
perror('process with args', args, 'runs with user', _user, '... should run with user', user)
return RET_ERR
return RET_OK
def fix_process(self, process):
if process['state'] == 'on':
if self.check_present(process, verbose=False) == RET_OK:
if ('uid' in process and self.check_uid(process, process['uid'], verbose=False) == RET_ERR) or \
('user' in process and self.check_user(process, process['user'], verbose=False) == RET_ERR):
perror(process, "runs with the wrong user. can't fix.")
return RET_ERR
return RET_OK
elif process['state'] == 'off':
if self.check_not_present(process, verbose=False) == RET_OK:
return RET_OK
if 'start' not in process or len(process['start'].strip()) == 0:
perror("undefined fix method for process", process['comm'])
return RET_ERR
v = process['start'].split(' ')
if not which(v[0]):
perror("fix command", v[0], "is not present or not executable")
return RET_ERR
pinfo('exec:', process['start'])
try:
p = Popen(v, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except Exception as e:
perror(e)
return RET_ERR
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
perror(err)
if p.returncode != 0:
perror("fix up command returned with error code", p.returncode)
return RET_ERR
return RET_OK
def check(self):
r = 0
for process in self.process:
r |= self.check_process(process)
return r
def fix(self):
r = 0
for process in self.process:
r |= self.fix_process(process)
return r
if __name__ == "__main__":
main(CompProcess)
opensvc-1.8~20170412/var/compliance/com.opensvc/cron.py 0000755 0001750 0001750 00000015316 13073467726 022743 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_CRON_ENTRY_",
"example_value": "add:osvc:* * * * *:/path/to/mycron:/etc/cron.d/opensvc",
"description": """* Add and Remove cron entries
* Support arbitrary cron file location
""",
}
import os
import sys
import shutil
import glob
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompCron(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname == 'SunOS' :
self.crontab_locs = [
'/var/spool/cron/crontabs'
]
else:
self.crontab_locs = [
'/etc/cron.d',
'/var/spool/cron/crontabs',
'/var/spool/cron',
'/var/cron/tabs',
]
self.ce = []
for _ce in self.get_rules_raw():
e = _ce.split(':')
if len(e) < 5:
perror("malformed variable %s. format: action:user:sched:cmd:[file]"%_ce)
continue
if e[0] not in ('add', 'del'):
perror("unsupported action in variable %s. set 'add' or 'del'"%_ce)
continue
if len(e[2].split()) != 5:
perror("malformed schedule in variable %s"%_ce)
continue
self.ce += [{
'var': _ce,
'action': e[0],
'user': e[1],
'sched': e[2],
'cmd': e[3],
'file': e[4],
}]
if len(self.ce) == 0:
raise NotApplicable()
def activate_cron(self, cron_file):
""" Activate changes (actually only needed on HP-UX)
"""
if '/var/spool/' in cron_file:
pinfo("tell crond about the change")
cmd = ['crontab', cron_file]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
def fixable(self):
r = RET_OK
for e in self.ce:
try:
self._fixable_cron(e)
except ComplianceError, e:
perror(str(e))
r = RET_ERR
except Unfixable, e:
perror(str(e))
return r
return r
def fix(self):
r = RET_OK
for e in self.ce:
try:
if e['action'] == 'add':
self._add_cron(e)
elif e['action'] == 'del':
self._del_cron(e)
except ComplianceError, e:
perror(str(e))
r = RET_ERR
except Unfixable, e:
perror(str(e))
return r
return r
def check(self):
r = RET_OK
for e in self.ce:
try:
self._check_cron(e)
except ComplianceError, e:
perror(str(e))
r = RET_ERR
except Unfixable, e:
perror(str(e))
return r
return r
def get_cron_file(self, e):
""" order of preference
"""
cron_file = None
for loc in self.crontab_locs:
if not os.path.exists(loc):
continue
if loc == '/etc/cron.d':
cron_file = os.path.join(loc, e['file'])
else:
cron_file = os.path.join(loc, e['user'])
break
return cron_file
def format_entry(self, cron_file, e):
if 'cron.d' in cron_file:
s = ' '.join([e['sched'], e['user'], e['cmd']])
else:
s = ' '.join([e['sched'], e['cmd']])
return s
def _fixable_cron(self, e):
cron_file = self.get_cron_file(e)
if cron_file is None:
raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs))
def _check_cron(self, e):
cron_file = self.get_cron_file(e)
if cron_file is None:
raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs))
s = self.format_entry(cron_file, e)
if not os.path.exists(cron_file):
raise ComplianceError("cron entry not found '%s' in '%s'"%(s, cron_file))
with open(cron_file, 'r') as f:
new = f.readlines()
found = False
for line in new:
if s == line[:-1]:
found = True
break
if not found and e['action'] == 'add':
raise ComplianceError("wanted cron entry not found: '%s' in '%s'"%(s, cron_file))
if found and e['action'] == 'del':
raise ComplianceError("unwanted cron entry found: '%s' in '%s'"%(s, cron_file))
def _del_cron(self, e):
cron_file = self.get_cron_file(e)
if cron_file is None:
raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs))
s = self.format_entry(cron_file, e)
if not os.path.exists(cron_file):
return
new = []
with open(cron_file, 'r') as f:
lines = f.readlines()
for line in lines:
if s == line[:-1]:
pinfo("delete entry '%s' from '%s'"%(s, cron_file))
continue
new.append(line)
if len(new) == 0:
pinfo('deleted last entry of %s. delete file too.'%cron_file)
os.unlink(cron_file)
else:
with open(cron_file, 'w') as f:
f.write(''.join(new))
self.activate_cron(cron_file)
def _add_cron(self, e):
cron_file = self.get_cron_file(e)
if cron_file is None:
raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs))
s = self.format_entry(cron_file, e)
new = False
if os.path.exists(cron_file):
with open(cron_file, 'r') as f:
new = f.readlines()
found = False
for line in new:
if s == line[:-1]:
found = True
break
if not found:
new.append(s+'\n')
else:
new = [s+'\n']
if not new:
raise ComplianceError("problem preparing the new crontab '%s'"%cron_file)
pinfo("add entry '%s' to '%s'"%(s, cron_file))
with open(cron_file, 'w') as f:
f.write(''.join(new))
self.activate_cron(cron_file)
if __name__ == "__main__":
main(CompCron)
opensvc-1.8~20170412/var/compliance/com.opensvc/fs.py 0000755 0001750 0001750 00000054001 13073467726 022404 0 ustar jkelbert jkelbert #!/usr/bin/env python
"""
Verify file content. The collector provides the format with
wildcards. The module replace the wildcards with contextual
values.
The variable format is json-serialized:
[{
"dev": "lv_applisogm",
"size": "1024M",
"mnt": "/%%ENV:SVCNAME%%/applis/ogm",
"vg": ["%%ENV:SVCNAME%%", "vgAPPLIS", "vgCOMMUN01", "vgLOCAL"]
}]
Wildcards:
%%ENV:VARNAME%% Any environment variable value
Toggle:
%%ENV:FS_STRIP_SVCNAME_FROM_DEV_IF_IN_VG%%
"""
import os
import sys
import json
import stat
import re
from subprocess import *
from stat import *
sys.path.append(os.path.dirname(__file__))
from comp import *
from utilities import which
class CompFs(object):
def __init__(self, prefix='OSVC_COMP_FS_'):
self.prefix = prefix.upper()
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.sysname = self.sysname.replace('-', '')
self.fs = []
self.res = {}
self.res_status = {}
if 'OSVC_COMP_SERVICES_SVCNAME' in os.environ:
self.svcname = os.environ['OSVC_COMP_SERVICES_SVCNAME']
self.osvc_service = True
else:
os.environ['OSVC_COMP_SERVICES_SVCNAME'] = ""
self.svcname = None
self.osvc_service = False
keys = [key for key in os.environ if key.startswith(self.prefix)]
if len(keys) == 0:
raise NotApplicable()
self.vglist()
for k in keys:
try:
self.fs += self.add_fs(os.environ[k])
except ValueError:
perror('failed to parse variable', os.environ[k])
if len(self.fs) == 0:
raise NotApplicable()
self.fs.sort(lambda x, y: cmp(x['mnt'], y['mnt']))
def vglist_HPUX(self):
import glob
l = glob.glob("/dev/*/group")
l = map(lambda x: x.split('/')[2], l)
self.vg = l
def vglist_Linux(self):
if not which("vgs"):
perror('vgs command not found')
raise ComplianceError()
cmd = ['vgs', '-o', 'vg_name', '--noheadings']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
perror('failed to list volume groups')
raise ComplianceError()
out = bdecode(out)
self.vg = out.split()
def vglist(self):
if not hasattr(self, 'vglist_'+self.sysname):
perror(self.sysname, 'not supported')
raise NotApplicable()
getattr(self, 'vglist_'+self.sysname)()
def subst(self, v):
if type(v) == list:
l = []
for _v in v:
l.append(self.subst(_v))
return l
if type(v) != str and type(v) != unicode:
return v
p = re.compile('%%ENV:\w+%%')
for m in p.findall(v):
s = m.strip("%").replace('ENV:', '')
if s in os.environ:
_v = os.environ[s]
elif 'OSVC_COMP_'+s in os.environ:
_v = os.environ['OSVC_COMP_'+s]
else:
perror(s, 'is not an env variable')
raise NotApplicable()
v = v.replace(m, _v)
return v.strip()
def add_fs(self, v):
if type(v) == str or type(v) == unicode:
d = json.loads(v)
else:
d = v
l = []
# recurse if multiple fs are specified in a list of dict
if type(d) == list:
for _d in d:
l += self.add_fs(_d)
return l
if type(d) != dict:
perror("not a dict:", d)
return l
if 'dev' not in d:
perror('dev should be in the dict:', d)
return l
if 'mnt' not in d:
perror('mnt should be in the dict:', d)
return l
if 'size' not in d:
perror('size should be in the dict:', d)
return l
if 'vg' not in d:
perror('vg should be in the dict:', d)
return l
if 'type' not in d:
perror('type should be in the dict:', d)
return l
if 'opts' not in d:
perror('opts should be in the dict:', d)
return l
if type(d['vg']) != list:
d['vg'] = [d['vg']]
d['vg_orig'] = d['vg']
d['vg'] = self.subst(d['vg'])
d['prefvg'] = self.prefvg(d)
d['dev'] = self.strip_svcname(d)
for k in ('dev', 'mnt', 'size', 'type', 'opts'):
d[k] = self.subst(d[k])
d['mnt'] = self.normpath(d['mnt'])
d['devpath'] = self.devpath(d)
d['rdevpath'] = self.rdevpath(d)
try:
d['size'] = self.size_to_mb(d)
except ComplianceError:
return []
return [d]
def strip_svcname(self, fs):
key = "OSVC_COMP_FS_STRIP_SVCNAME_FROM_DEV_IF_IN_VG"
if key not in os.environ or os.environ[key] != "true":
return fs['dev']
if "%%ENV:SERVICES_SVCNAME%%" not in fs['vg_orig'][fs['prefvg_idx']]:
return fs['dev']
# the vg is dedicated to the service. no need to embed
# the service name in the lv name too
s = fs['dev'].replace("%%ENV:SERVICES_SVCNAME%%", "")
if s == "lv_":
s = "root"
return s
def normpath(self, p):
l = p.split('/')
p = os.path.normpath(os.path.join(os.sep, *l))
return p
def rdevpath(self, d):
return '/dev/%s/r%s'%(d['prefvg'], d['dev'])
def devpath(self, d):
return '/dev/%s/%s'%(d['prefvg'], d['dev'])
def prefvg(self, d):
lc_candidate_vg = map(lambda x: x.lower(), d['vg'])
lc_existing_vg = map(lambda x: x.lower(), self.vg)
for i, vg in enumerate(lc_candidate_vg):
if vg in lc_existing_vg:
d['prefvg_idx'] = i
# return capitalized vg name
return self.vg[lc_existing_vg.index(vg)]
perror("no candidate vg is available on this node for dev %s"%d['dev'])
raise NotApplicable()
def check_fs_mnt(self, fs, verbose=False):
if not os.path.exists(fs['mnt']):
if verbose:
perror("mount point", fs['mnt'], "does not exist")
return 1
if verbose:
pinfo("mount point", fs['mnt'], "exists")
return 0
def check_fs_dev_exists(self, fs, verbose=False):
if not os.path.exists(fs['devpath']):
if verbose:
perror("device", fs['devpath'], "does not exist")
return 1
if verbose:
pinfo("device", fs['devpath'], "exists")
return 0
def check_fs_dev_stat(self, fs, verbose=False):
mode = os.stat(fs['devpath'])[ST_MODE]
if not S_ISBLK(mode):
if verbose:
perror("device", fs['devpath'], "is not a block device")
return 1
if verbose:
pinfo("device", fs['devpath'], "is a block device")
return 0
def find_vg_rid(self, vgname):
rids = [ rid for rid in self.res_status.keys() if rid.startswith('vg#') ]
for rid in rids:
if self.get_res_item(rid, 'vgname') == vgname:
return rid
return None
def private_svc_vg_down(self, fs):
if self.svcname is None or not self.osvc_service:
return False
rid = self.find_vg_rid(fs['prefvg'])
if rid is None:
# vg is not driven by the service
return False
if self.res_status[rid] not in ('up', 'stdby up'):
return False
return True
def check_fs_dev(self, fs, verbose=False):
if self.private_svc_vg_down(fs):
# don't report error on passive node with private svc prefvg
return 0
if self.check_fs_dev_exists(fs, verbose) == 1:
return 1
if self.check_fs_dev_stat(fs, verbose) == 1:
return 1
return 0
def fix_fs_dev(self, fs):
if self.check_fs_dev(fs, False) == 0:
return 0
if self.check_fs_dev_exists(fs, False) == 0:
perror("device", fs['devpath'], "already exists. won't fix.")
return 1
return self.createlv(fs)
def createlv(self, fs):
if not hasattr(self, 'createlv_'+self.sysname):
perror(self.sysname, 'not supported')
raise NotApplicable()
return getattr(self, 'createlv_'+self.sysname)(fs)
def size_to_mb(self, fs):
s = fs['size']
unit = s[-1]
size = int(s[:-1])
if unit == 'T':
s = str(size*1024*1024)
elif unit == 'G':
s = str(size*1024)
elif unit == 'M':
s = str(size)
elif unit == 'K':
s = str(size//1024)
else:
perror("unknown size unit in rule: %s (use T, G, M or K)"%s)
raise ComplianceError()
return s
def createlv_HPUX(self, fs):
cmd = ['lvcreate', '-n', fs['dev'], '-L', fs['size'], fs['prefvg']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
pinfo(err)
if p.returncode != 0:
return 1
return 0
def createlv_Linux(self, fs):
os.environ["LVM_SUPPRESS_FD_WARNINGS"] = "1"
cmd = ['lvcreate', '-n', fs['dev'], '-L', fs['size']+'M', fs['prefvg']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
pinfo(err)
if p.returncode != 0:
return 1
return 0
def fix_fs_mnt(self, fs, verbose=False):
if self.check_fs_mnt(fs, False) == 0:
return 0
pinfo("create", fs['mnt'], "mount point")
os.makedirs(fs['mnt'])
return 0
def check_fs_fmt_HPUX_vxfs(self, fs, verbose=False):
cmd = ['fstyp', fs['devpath']]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if p.returncode != 0 or "vxfs" not in out:
if verbose:
perror(fs['devpath'], "is not formatted")
return 1
if verbose:
pinfo(fs['devpath'], "is correctly formatted")
return 0
def check_fs_fmt_HPUX(self, fs, verbose=False):
if fs['type'] == 'vxfs':
return self.check_fs_fmt_HPUX_vxfs(fs, verbose)
perror("unsupported fs type: %s"%fs['type'])
return 1
def check_fs_fmt_Linux(self, fs, verbose=False):
if fs['type'] in ('ext2', 'ext3', 'ext4'):
return self.check_fs_fmt_Linux_ext(fs, verbose)
perror("unsupported fs type: %s"%fs['type'])
return 1
def check_fs_fmt_Linux_ext(self, fs, verbose=False):
cmd = ['tune2fs', '-l', fs['devpath']]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if p.returncode != 0:
if verbose:
perror(fs['devpath'], "is not formatted")
return 1
if verbose:
pinfo(fs['devpath'], "is correctly formatted")
return 0
def fix_fs_fmt_Linux_ext(self, fs):
cmd = ['mkfs.'+fs['type'], '-q', '-b', '4096', fs['devpath']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
pinfo(err)
if p.returncode != 0:
return 1
cmd = ['tune2fs', '-m', '0', '-c', '0', '-i', '0', fs['devpath']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
pinfo(err)
if p.returncode != 0:
return 1
return 0
def fix_fs_fmt_Linux(self, fs):
if fs['type'] in ('ext2', 'ext3', 'ext4'):
return self.fix_fs_fmt_Linux_ext(fs)
perror("unsupported fs type: %s"%fs['type'])
return 1
def check_fs_fmt(self, fs, verbose=False):
if not hasattr(self, 'check_fs_fmt_'+self.sysname):
perror(self.sysname, 'not supported')
raise NotApplicable()
return getattr(self, 'check_fs_fmt_'+self.sysname)(fs, verbose)
def fix_fs_fmt_HPUX_vxfs(self, fs):
cmd = ['newfs', '-F', 'vxfs', '-b', '8192', fs['rdevpath']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
pinfo(err)
if p.returncode != 0:
return 1
return 0
def fix_fs_fmt_HPUX(self, fs):
if fs['type'] == 'vxfs':
return self.fix_fs_fmt_HPUX_vxfs(fs)
perror("unsupported fs type: %s"%fs['type'])
return 1
if not hasattr(self, 'check_fs_fmt_'+self.sysname):
perror(self.sysname, 'not supported')
raise NotApplicable()
return getattr(self, 'check_fs_fmt_'+self.sysname)(fs, verbose)
def fix_fs_fmt(self, fs):
if self.check_fs_fmt(fs) == 0:
return 0
if not hasattr(self, 'fix_fs_fmt_'+self.sysname):
perror(self.sysname, 'not supported')
raise NotApplicable()
return getattr(self, 'fix_fs_fmt_'+self.sysname)(fs)
def get_res_item(self, rid, item):
cmd = ['svcmgr', '-s', self.svcname, 'get', '--param', '.'.join((rid, item))]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if p.returncode != 0:
perror(' '.join(cmd), 'failed')
return 1
return out.strip()
def get_res(self, rid):
if rid in self.res:
return self.res[rid]
d = {}
d['mnt'] = self.get_res_item(rid, 'mnt')
d['dev'] = self.get_res_item(rid, 'dev')
self.res[rid] = d
return d
def get_fs_rids(self, refresh=False):
if not refresh and hasattr(self, 'rids'):
return self.rids
cmd = ['svcmgr', '-s', self.svcname, 'json_status']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
for line in out.splitlines():
if line.startswith('{'):
out = line
break
try:
# json_status returns 0, even when it outs no data
self.res_status = json.loads(out)['resources']
except Exception as e:
pinfo(e)
pinfo(out)
self.rids = []
self.osvc_service = False
return self.rids
self.rids = [ k for k in self.res_status.keys() if k.startswith('fs#') ]
return self.rids
def find_rid(self, fs):
found = False
for rid in self.rids:
d = self.get_res(rid)
if d['mnt'] == fs['mnt'] and d['dev'] == fs['devpath']:
return rid
return None
def fix_fs_local(self, fs):
if self.svcname is not None and self.osvc_service:
return 0
if self.check_fs_local(fs, False) == 0:
return 0
with open("/etc/fstab", "r") as f:
lines = f.read().split('\n')
if len(lines[-1]) == 0:
del(lines[-1])
p = re.compile(r'\s*%s\s+'%(fs['devpath']))
newline = "%s %s %s %s 0 2"%(fs['devpath'], fs['mnt'], fs['type'], fs['opts'])
for i, line in enumerate(lines):
if line == newline:
return 0
if re.match(p, line) is not None:
pinfo("remove '%s' from fstab"%line)
del lines[i]
lines.append(newline)
pinfo("append '%s' to fstab"%newline)
try:
with open("/etc/fstab", "w") as f:
f.write("\n".join(lines)+'\n')
except:
perror("failed to rewrite fstab")
return 1
pinfo("fstab rewritten")
return 0
def check_fs_local(self, fs, verbose=False):
if self.svcname is not None and self.osvc_service:
return 0
p = re.compile(r'\s*%s\s+%s'%(fs['devpath'], fs['mnt']))
with open("/etc/fstab", "r") as f:
buff = f.read()
if re.search(p, buff) is not None:
if verbose:
pinfo("%s@%s resource correctly set in fstab"%(fs['mnt'], fs['devpath']))
return 0
if verbose:
perror("%s@%s resource correctly set in fstab"%(fs['mnt'], fs['devpath']))
return 1
def check_fs_svc(self, fs, verbose=False):
if self.svcname is None:
return 0
rids = self.get_fs_rids()
if not self.osvc_service:
return 0
rid = self.find_rid(fs)
if rid is None:
if verbose:
perror("%s@%s resource not found in service %s"%(fs['mnt'], fs['devpath'], self.svcname))
return 1
if verbose:
pinfo("%s@%s resource correctly set in service %s"%(fs['mnt'], fs['devpath'], self.svcname))
return 0
def fix_fs_svc(self, fs):
if not self.osvc_service or self.check_fs_svc(fs, False) == 0:
return 0
cmd = ['svcmgr', '-s', self.svcname, 'get', '--param', 'DEFAULT.encapnodes']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if self.nodename in out.strip().split():
tags = "encap"
else:
tags = ''
cmd = ['svcmgr', '-s', self.svcname, 'update', '--resource',
'{"rtype": "fs", "mnt": "%s", "dev": "%s", "type": "%s", "mnt_opt": "%s", "tags": "%s"}'%(fs['mnt'], fs['devpath'], fs['type'], fs['opts'], tags)]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if p.returncode != 0:
perror("unable to fetch %s json status"%self.svcname)
return 1
return 0
def check_fs_mounted(self, fs, verbose=False):
if os.path.ismount(fs['mnt']):
if verbose:
pinfo(fs['mnt'], "is mounted")
return 0
if verbose:
perror(fs['mnt'], "is not mounted")
return 1
def fix_fs_mounted(self, fs):
if self.check_fs_mounted(fs, False) == 0:
return 0
if self.svcname is None or not self.osvc_service:
return self.fix_fs_mounted_local(fs)
else:
return self.fix_fs_mounted_svc(fs)
def fix_fs_mounted_svc(self, fs):
rids = self.get_fs_rids(refresh=True)
rid = self.find_rid(fs)
if rid is None:
perror("fs resource with mnt=%s not found in service %s"%(fs['mnt'], self.svcname))
return 1
cmd = ['svcmgr', '-s', self.svcname, '--rid', rid, 'mount', '--cluster']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if p.returncode != 0 and "unsupported action" in err:
cmd = ['svcmgr', '-s', self.svcname, '--rid', rid, 'startfs', '--cluster']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
pinfo(' '.join(cmd))
if p.returncode != 0:
perror("unable to mount %s"%fs['mnt'])
return 1
return 0
def fix_fs_mounted_local(self, fs):
cmd = ['mount', fs['mnt']]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
err = bdecode(err)
if len(out) > 0:
pinfo(out)
if len(err) > 0:
perror(err)
if p.returncode != 0:
perror("unable to mount %s"%fs['mnt'])
return 1
return 0
def check_fs(self, fs, verbose=False):
r = 0
r |= self.check_fs_mnt(fs, verbose)
r |= self.check_fs_dev(fs, verbose)
r |= self.check_fs_fmt(fs, verbose)
r |= self.check_fs_svc(fs, verbose)
r |= self.check_fs_local(fs, verbose)
r |= self.check_fs_mounted(fs, verbose)
return r
def fix_fs(self, fs):
if self.fix_fs_mnt(fs) != 0:
return 1
if self.fix_fs_dev(fs) != 0:
return 1
if self.fix_fs_fmt(fs) != 0:
return 1
if self.fix_fs_svc(fs) != 0:
return 1
if self.fix_fs_local(fs) != 0:
return 1
if self.fix_fs_mounted(fs) != 0:
return 1
return 0
def fixable(self):
return RET_NA
def check(self):
r = 0
for f in self.fs:
r |= self.check_fs(f, verbose=True)
return r
def fix(self):
r = 0
for f in self.fs:
r |= self.fix_fs(f)
return r
if __name__ == "__main__":
syntax = """syntax:
%s PREFIX check|fixable|fix"""%sys.argv[0]
if len(sys.argv) != 3:
perror("wrong number of arguments")
perror(syntax)
sys.exit(RET_ERR)
try:
o = CompFs(sys.argv[1])
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except NotApplicable:
sys.exit(RET_NA)
except ComplianceError:
sys.exit(RET_ERR)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
opensvc-1.8~20170412/var/compliance/com.opensvc/sudoers.py 0000755 0001750 0001750 00000004510 13073467726 023460 0 ustar jkelbert jkelbert #!/usr/bin/env python
"""
Same as files compliance object, but verifies the sudoers
declaration syntax using visudo in check mode.
The variable format is json-serialized:
{
"path": "/some/path/to/file",
"fmt": "root@corp.com %%HOSTNAME%%@corp.com",
"uid": 500,
"gid": 500,
}
Wildcards:
%%ENV:VARNAME%% Any environment variable value
%%HOSTNAME%% Hostname
%%SHORT_HOSTNAME%% Short hostname
"""
import os
import sys
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
from files import CompFiles
class CompSudoers(CompFiles):
def check_file_syntax(self, f, verbose=False):
cmd = ['visudo', '-c', '-f', '-']
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(input=bencode(f['fmt']))
if p.returncode != 0:
if verbose:
perror("target sudoers rules syntax error.")
else:
perror("target sudoers rules syntax error. abort installation.")
return p.returncode
def check(self):
r = 0
for f in self.files:
r |= self.check_file_syntax(f, verbose=True)
r |= self.check_file(f, verbose=True)
return r
def fix(self):
r = 0
for f in self.files:
if self.check_file_syntax(f):
r |= 1
# refuse to install a corrupted sudoers file
continue
r |= self.fix_file_fmt(f)
r |= self.fix_file_mode(f)
r |= self.fix_file_owner(f)
return r
if __name__ == "__main__":
syntax = """syntax:
%s PREFIX check|fixable|fix"""%sys.argv[0]
if len(sys.argv) != 3:
perror("wrong number of arguments")
perror(syntax)
sys.exit(RET_ERR)
try:
o = CompSudoers(sys.argv[1])
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except ComplianceError:
sys.exit(RET_ERR)
except NotApplicable:
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
opensvc-1.8~20170412/var/compliance/com.opensvc/file.py 0000755 0001750 0001750 00000030336 13073467726 022720 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_FILE_",
"example_value": """
{
"path": "/some/path/to/file",
"fmt": "root@corp.com %%HOSTNAME%%@corp.com",
"uid": 500,
"gid": 500,
}
""",
"description": """* Verify and install file content.
* Verify and set file or directory ownership and permission
* Directory mode is triggered if the path ends with /
Special wildcards::
%%ENV:VARNAME%% Any environment variable value
%%HOSTNAME%% Hostname
%%SHORT_HOSTNAME%% Short hostname
""",
"form_definition": """
Desc: |
A file rule, fed to the 'files' compliance object to create a directory or a file and set its ownership and permissions. For files, a reference content can be specified or pointed through an URL.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: file
Type: json
Format: dict
Inputs:
-
Id: path
Label: Path
DisplayModeLabel: path
LabelCss: action16
Mandatory: Yes
Help: File path to install the reference content to. A path ending with '/' is treated as a directory and as such, its content need not be specified.
Type: string
-
Id: mode
Label: Permissions
DisplayModeLabel: perm
LabelCss: action16
Help: "In octal form. Example: 644"
Type: integer
-
Id: uid
Label: Owner
DisplayModeLabel: uid
LabelCss: guy16
Help: Either a user ID or a user name
Type: string or integer
-
Id: gid
Label: Owner group
DisplayModeLabel: gid
LabelCss: guy16
Help: Either a group ID or a group name
Type: string or integer
-
Id: ref
Label: Content URL pointer
DisplayModeLabel: ref
LabelCss: loc
Help: "Examples:
http://server/path/to/reference_file
https://server/path/to/reference_file
ftp://server/path/to/reference_file
ftp://login:pass@server/path/to/reference_file"
Type: string
-
Id: fmt
Label: Content
DisplayModeLabel: fmt
LabelCss: hd16
Css: pre
Help: A reference content for the file. The text can embed substitution variables specified with %%ENV:VAR%%.
Type: text
"""
}
import os
import sys
import json
import stat
import re
import urllib
import ssl
import tempfile
import pwd
import grp
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class InitError(Exception):
pass
class CompFiles(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self._usr = {}
self._grp = {}
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.files = []
for rule in self.get_rules():
try:
self.files += self.add_file(rule)
except InitError:
continue
except ValueError:
perror('file: failed to parse variable', os.environ[k])
if len(self.files) == 0:
raise NotApplicable()
def parse_fmt(self, d, add_linefeed=True):
if isinstance(d['fmt'], int):
d['fmt'] = str(d['fmt'])
d['fmt'] = d['fmt'].replace('%%HOSTNAME%%', self.nodename)
d['fmt'] = d['fmt'].replace('%%SHORT_HOSTNAME%%', self.nodename.split('.')[0])
d['fmt'] = self.subst(d['fmt'])
if add_linefeed and not d['fmt'].endswith('\n'):
d['fmt'] += '\n'
return [d]
def parse_ref(self, d):
f = tempfile.NamedTemporaryFile()
tmpf = f.name
f.close()
try:
self.urlretrieve(d['ref'], tmpf)
except IOError as e:
perror("file ref", d['ref'], "download failed:", e)
raise InitError()
with open(tmpf, "r") as f:
d['fmt'] = f.read()
return self.parse_fmt(d, add_linefeed=False)
def add_file(self, d):
if 'path' not in d:
perror('file: path should be in the dict:', d)
RET = RET_ERR
return []
if 'fmt' not in d and 'ref' not in d and not d['path'].endswith("/"):
perror('file: fmt or ref should be in the dict:', d)
RET = RET_ERR
return []
if 'fmt' in d and 'ref' in d:
perror('file: fmt and ref are exclusive:', d)
RET = RET_ERR
return []
try:
d["uid"] = int(d["uid"])
except:
pass
try:
d["gid"] = int(d["gid"])
except:
pass
if 'fmt' in d:
return self.parse_fmt(d)
if 'ref' in d:
if not d["ref"].startswith("safe://"):
return self.parse_ref(d)
return [d]
def fixable(self):
return RET_NA
def check_file_fmt(self, f, verbose=False):
if not os.path.exists(f['path']):
return RET_ERR
if f['path'].endswith('/'):
# don't check content if it's a directory
return RET_OK
if 'ref' in f and f['ref'].startswith("safe://"):
return self.check_file_fmt_safe(f, verbose=verbose)
else:
return self.check_file_fmt_buffered(f, verbose=verbose)
def fix_file_fmt_safe(self, f):
pinfo("file reference %s download to %s" % (f["ref"], f["path"]))
tmpfname = self.get_safe_file(f["ref"])
pinfo("file %s content install" % f["path"])
import shutil
shutil.copy(tmpfname, f["path"])
os.unlink(tmpfname)
return RET_OK
def check_file_fmt_safe(self, f, verbose=False):
try:
data = self.collector_safe_file_get_meta(f["ref"])
except ComplianceError as e:
raise ComplianceError(str(e))
target_md5 = data.get("md5")
current_md5 = self.md5(f["path"])
if target_md5 == current_md5:
pinfo("file %s md5 verified" % f["path"])
return RET_OK
else:
perror("file %s content md5 differs from its reference" % f["path"])
if verbose and data["size"] < 1000000:
tmpfname = self.get_safe_file(f["ref"])
self.check_file_diff(f, tmpfname, verbose=verbose)
os.unlink(tmpfname)
return RET_ERR
def get_safe_file(self, uuid):
tmpf = tempfile.NamedTemporaryFile()
tmpfname = tmpf.name
tmpf.close()
try:
self.collector_safe_file_download(uuid, tmpfname)
except Exception as e:
raise ComplianceError("%s: %s" % (uuid, str(e)))
return tmpfname
def check_file_fmt_buffered(self, f, verbose=False):
tmpf = tempfile.NamedTemporaryFile()
tmpfname = tmpf.name
tmpf.close()
with open(tmpfname, 'w') as tmpf:
tmpf.write(f['fmt'])
ret = self.check_file_diff(f, tmpfname, verbose=verbose)
os.unlink(tmpfname)
return ret
def check_file_diff(self, f, refpath, verbose=False):
if "OSVC_COMP_NODES_OS_NAME" in os.environ and os.environ['OSVC_COMP_NODES_OS_NAME'] in ("Linux"):
cmd = ['diff', '-u', f['path'], refpath]
else:
cmd = ['diff', f['path'], refpath]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
if verbose and len(out) > 0:
perror(out.strip('\n'))
if p.returncode != 0:
return RET_ERR
return RET_OK
def check_file_mode(self, f, verbose=False):
if 'mode' not in f:
return RET_OK
try:
mode = oct(stat.S_IMODE(os.stat(f['path']).st_mode))
except:
if verbose: perror("file", f['path'], 'stat() failed')
return RET_ERR
mode = str(mode).lstrip("0o")
target_mode = str(f['mode']).lstrip("0o")
if mode != target_mode:
if verbose: perror("file", f['path'], 'mode should be %s but is %s'%(target_mode, mode))
return RET_ERR
return RET_OK
def get_uid(self, uid):
if uid in self._usr:
return self._usr[uid]
tuid = uid
if is_string(uid):
try:
info=pwd.getpwnam(uid)
tuid = info[2]
self._usr[uid] = tuid
except:
perror("file: user %s does not exist"%uid)
raise ComplianceError()
return tuid
def get_gid(self, gid):
if gid in self._grp:
return self._grp[gid]
tgid = gid
if is_string(gid):
try:
info=grp.getgrnam(gid)
tgid = info[2]
self._grp[gid] = tgid
except:
perror("file: group %s does not exist"%gid)
raise ComplianceError()
return tgid
def check_file_uid(self, f, verbose=False):
if 'uid' not in f:
return RET_OK
tuid = self.get_uid(f['uid'])
uid = os.stat(f['path']).st_uid
if uid != tuid:
if verbose: perror("file", f['path'], 'uid should be %s but is %s'%(tuid, str(uid)))
return RET_ERR
return RET_OK
def check_file_gid(self, f, verbose=False):
if 'gid' not in f:
return RET_OK
tgid = self.get_gid(f['gid'])
gid = os.stat(f['path']).st_gid
if gid != tgid:
if verbose: perror("file", f['path'], 'gid should be %s but is %s'%(tgid, str(gid)))
return RET_ERR
return RET_OK
def check_file(self, f, verbose=False):
if not os.path.exists(f['path']):
perror("file", f['path'], "does not exist")
return RET_ERR
r = 0
r |= self.check_file_fmt(f, verbose)
r |= self.check_file_mode(f, verbose)
r |= self.check_file_uid(f, verbose)
r |= self.check_file_gid(f, verbose)
if r == 0 and verbose:
pinfo("file", f['path'], "is ok")
return r
def fix_file_mode(self, f):
if 'mode' not in f:
return RET_OK
if self.check_file_mode(f) == RET_OK:
return RET_OK
try:
pinfo("file %s mode set to %s"%(f['path'], str(f['mode'])))
os.chmod(f['path'], int(str(f['mode']), 8))
except:
return RET_ERR
return RET_OK
def fix_file_owner(self, f):
uid = -1
gid = -1
if 'uid' not in f and 'gid' not in f:
return RET_OK
if 'uid' in f and self.check_file_uid(f) != RET_OK:
uid = self.get_uid(f['uid'])
if 'gid' in f and self.check_file_gid(f) != RET_OK:
gid = self.get_gid(f['gid'])
if uid == -1 and gid == -1:
return RET_OK
try:
os.chown(f['path'], uid, gid)
except:
perror("file %s ownership set to %d:%d failed"%(f['path'], uid, gid))
return RET_ERR
pinfo("file %s ownership set to %d:%d"%(f['path'], uid, gid))
return RET_OK
def fix_file_fmt(self, f):
if f['path'].endswith("/") and not os.path.exists(f['path']):
try:
pinfo("file: mkdir", f['path'])
os.makedirs(f['path'])
except:
perror("file: failed to create", f['path'])
return RET_ERR
return RET_OK
if self.check_file_fmt(f, verbose=False) == RET_OK:
return RET_OK
if 'ref' in f and f['ref'].startswith("safe://"):
return self.fix_file_fmt_safe(f)
d = os.path.dirname(f['path'])
if not os.path.exists(d):
pinfo("file: mkdir", d)
os.makedirs(d)
try:
os.chown(d, self.get_uid(f['uid']), self.get_gid(f['gid']))
except Exception as e:
perror("file:", e)
pass
try:
with open(f['path'], 'w') as fi:
fi.write(f['fmt'])
except Exception as e:
perror("file:", e)
return RET_ERR
pinfo("file", f['path'], "rewritten")
return RET_OK
def check(self):
r = 0
for f in self.files:
r |= self.check_file(f, verbose=True)
return r
def fix(self):
r = 0
for f in self.files:
r |= self.fix_file_fmt(f)
r |= self.fix_file_mode(f)
r |= self.fix_file_owner(f)
return r
if __name__ == "__main__":
main(CompFiles)
opensvc-1.8~20170412/var/compliance/com.opensvc/keyval.py 0000755 0001750 0001750 00000025507 13073467726 023300 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_kwargs": {
"path": "/etc/ssh/sshd_config",
},
"example_value": """
[
{
"key": "PermitRootLogin",
"op": "=",
"value": "yes"
}
]
or
{
"path": "/etc/ssh/sshd_config",
"keys": [
{
"key": "PermitRootLogin",
"op": "=",
"value": "yes"
}
]
}
""",
"description": """* Setup and verify keys in "key value" formatted configuration file.
* Example files: sshd_config, ssh_config, ntp.conf, ...
""",
"form_definition": """
Desc: |
A rule to set a list of parameters in simple keyword/value configuration file format. Current values can be checked as set or unset, strictly equal, or superior/inferior to their target value.
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: keyval
Inputs:
-
Id: key
Label: Key
DisplayModeTrim: 64
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help:
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- reset
- unset
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The configuration file parameter target value.
""",
}
import os
import sys
import json
sys.path.append(os.path.dirname(__file__))
from comp import *
from keyval_parser import Parser, ParserError
class KeyVal(CompObject):
def __init__(self, prefix=None, path=None):
CompObject.__init__(self, prefix=prefix, data=data)
self.cf = path
def init(self):
self.nocf = False
self.file_keys = {}
if self.cf:
self.file_keys[self.cf] = {
"target_n_key": {},
"keys": [],
}
for rule in self.get_rules():
if self.cf and "key" in rule:
self.file_keys[self.cf]["keys"] += [rule]
continue
if "path" not in rule:
continue
if "keys" not in rule or not isinstance(rule["keys"], list):
continue
path = rule["path"]
if path not in self.file_keys:
self.file_keys[path] = {
"target_n_key": {},
"keys": rule["keys"],
}
else:
self.file_keys[path]["keys"] += rule["keys"]
for path, data in self.file_keys.items():
for i, key in enumerate(data["keys"]):
if data["keys"][i]['op'] == 'IN':
data["keys"][i]['value'] = json.loads(data["keys"][i]['value'])
if 'op' in key and 'key' in key and key['op'] not in ("unset", "reset"):
if key['key'] not in data["target_n_key"]:
data["target_n_key"][key['key']] = 1
else:
data["target_n_key"][key['key']] += 1
try:
data["conf"] = Parser(path)
except ParserError as e:
perror(e)
raise ComplianceError()
def fixable(self):
return RET_OK
def _check_key(self, path, data, keyname, target, op, value, instance=0, verbose=True):
r = RET_OK
if op == "reset":
if value is not None:
current_n_key = len(value)
target_n_key = data["target_n_key"][keyname] if keyname in data["target_n_key"] else 0
if current_n_key > target_n_key:
if verbose:
perror("%s is set %d times, should be set %d times"%(keyname, current_n_key, target_n_key))
return RET_ERR
else:
if verbose:
pinfo("%s is set %d times, on target"%(keyname, current_n_key))
return RET_OK
else:
return RET_OK
elif op == "unset":
if value is not None:
if target.strip() == "":
if verbose:
perror("%s is set, should not be"%keyname)
return RET_ERR
target_found = False
for i, val in enumerate(value):
if target == val:
target_found = True
break
if target_found:
if verbose:
perror("%s[%d] is set to value %s, should not be"%(keyname, i, target))
return RET_ERR
else:
if verbose:
pinfo("%s is not set to value %s, on target"%(keyname, target))
return RET_OK
else:
if target.strip() != "":
if verbose:
pinfo("%s=%s is not set, on target"%(keyname, target))
else:
if verbose:
pinfo("%s is not set, on target"%keyname)
return RET_OK
if value is None:
if op == 'IN' and "unset" in map(str, target):
if verbose:
pinfo("%s is not set, on target"%(keyname))
return RET_OK
else:
if verbose:
perror("%s[%d] is not set, target: %s"%(keyname, instance, str(target)))
return RET_ERR
if type(value) == list:
if str(target) in value:
if verbose:
pinfo("%s[%d]=%s on target"%(keyname, instance, str(value)))
return RET_OK
else:
if verbose:
perror("%s[%d]=%s is not set"%(keyname, instance, str(target)))
return RET_ERR
if op == '=':
if str(value) != str(target):
if verbose:
perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
elif op == 'IN':
if str(value) not in map(str, target):
if verbose:
perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
else:
if type(value) != int:
if verbose:
perror("%s[%d]=%s value must be integer"%(keyname, instance, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s[%d]=%s target: <= %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s[%d]=%s target: >= %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s[%d]=%s on target"%(keyname, instance, str(value)))
return r
def check_key(self, path, data, key, instance=0, verbose=True):
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
return RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
return RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
target = key['value']
allowed_ops = ('>=', '<=', '=', 'unset', 'reset', 'IN')
if op not in allowed_ops:
if verbose:
perror(key['key'], "'op' value must be one of", ", ".join(allowed_ops))
return RET_NA
keyname = key['key']
value = data["conf"].get(keyname, instance=instance)
r = self._check_key(path, data, keyname, target, op, value, instance=instance, verbose=verbose)
return r
def fix_key(self, path, data, key, instance=0):
if key['op'] == "unset" or (key['op'] == "IN" and key['value'][0] == "unset"):
pinfo("%s unset"%key['key'])
if key['op'] == "IN":
target = None
else:
target = key['value']
data["conf"].unset(key['key'], target)
elif key['op'] == "reset":
target_n_key = data["target_n_key"][key['key']] if key['key'] in data["target_n_key"] else 0
pinfo("%s truncated to %d definitions"%(key['key'], target_n_key))
data["conf"].truncate(key['key'], target_n_key)
else:
if key['op'] == "IN":
target = key['value'][0]
else:
target = key['value']
pinfo("%s=%s set"%(key['key'], target))
data["conf"].set(key['key'], target, instance=instance)
def check(self):
r = RET_OK
for path, data in self.file_keys.items():
r |= self.check_keys(path, data)
return r
def check_keys(self, path, data):
r = RET_OK
key_instance = {}
for key in data["keys"]:
if 'key' not in key or 'op' not in key:
continue
if key['op'] in ('reset', 'unset'):
instance = None
else:
if key['key'] not in key_instance:
key_instance[key['key']] = 0
else:
key_instance[key['key']] += 1
instance = key_instance[key['key']]
r |= self.check_key(path, data, key, instance=instance, verbose=True)
return r
def fix(self):
r = RET_OK
for path, data in self.file_keys.items():
r |= self.fix_keys(path, data)
return r
def fix_keys(self, path, data):
key_instance = {}
for key in data["keys"]:
if 'key' not in key or 'op' not in key:
continue
if key['op'] in ('reset', 'unset'):
instance = None
else:
if key['key'] not in key_instance:
key_instance[key['key']] = 0
else:
key_instance[key['key']] += 1
instance = key_instance[key['key']]
if self.check_key(path, data, key, instance=instance, verbose=False) == RET_ERR:
self.fix_key(path, data, key, instance=instance)
if not data["conf"].changed:
return RET_OK
try:
data["conf"].write()
except ParserError as e:
perror(e)
return RET_ERR
return RET_OK
if __name__ == "__main__":
main(KeyVal)
opensvc-1.8~20170412/var/compliance/com.opensvc/nodeconf.py 0000755 0001750 0001750 00000013114 13073467726 023567 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_NODECONF_",
"example_value": """
[
{
"key": "node.repopkg",
"op": "=",
"value": "ftp://ftp.opensvc.com/opensvc"
},
{
"key": "node.repocomp",
"op": "=",
"value": "ftp://ftp.opensvc.com/compliance"
}
]
""",
"description": """* Verify opensvc agent configuration parameter
""",
"form_definition": """
Desc: |
A rule to set a parameter in OpenSVC node.conf configuration file. Used by the 'nodeconf' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: nodeconf
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The OpenSVC node.conf parameter to check.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The OpenSVC node.conf parameter value to check.
""",
}
import os
import sys
import json
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class NodeConf(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.keys = self.get_rules()
def fixable(self):
return RET_OK
def unset_val(self, keyname):
cmd = ['nodemgr', 'unset', '--param', keyname]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return p.returncode
def set_val(self, keyname, target):
if type(target) == int:
target = str(target)
cmd = ['nodemgr', 'set', '--param', keyname, '--value', target]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return p.returncode
def get_val(self, keyname):
cmd = ['nodemgr', 'get', '--param', keyname]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
#perror('\n'.join((' '.join(cmd), out, err)))
return
if "deprecated" in err:
return
out = out.strip()
try:
out = int(out)
except:
pass
return out
def _check_key(self, keyname, target, op, value, verbose=True):
r = RET_OK
if value is None:
if verbose:
perror("%s not set"%keyname)
r |= RET_ERR
if op == '=':
if str(value) != str(target):
if verbose:
perror("%s=%s, target: %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
elif op == 'unset':
if verbose:
perror("%s=%s value must be unset"%(keyname, str(value)))
r |= RET_ERR
else:
if type(value) != int:
if verbose:
perror("%s=%s value must be integer"%(keyname, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s=%s target: <= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s=%s target: >= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
return r
def check_key(self, key, verbose=True):
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
return RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
return RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
target = key['value']
if op not in ('>=', '<=', '=', 'unset'):
if verbose:
perror("'value' list member 0 must be either '=', '>=', '<=' or unset: %s"%str(key))
return RET_NA
keyname = key['key']
value = self.get_val(keyname)
if value is None:
if op == 'unset':
if verbose:
pinfo("%s key is not set"%keyname)
return RET_OK
else:
if verbose:
perror("%s key is not set"%keyname)
return RET_ERR
return self._check_key(keyname, target, op, value, verbose)
def fix_key(self, key):
if 'op' not in key:
op = "="
else:
op = key['op']
if op == "unset":
return self.unset_val(key['key'])
else:
return self.set_val(key['key'], key['value'])
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def fix(self):
r = 0
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
r += self.fix_key(key)
return r
if __name__ == "__main__":
main(NodeConf)
opensvc-1.8~20170412/var/compliance/com.opensvc/firmware.py 0000755 0001750 0001750 00000015063 13073467726 023615 0 ustar jkelbert jkelbert #!/usr/bin/env python
import os
import sys
import json
from distutils.version import LooseVersion as V
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompFirmware(object):
def __init__(self, var):
self.versions = {}
if var not in os.environ:
pinfo(var, 'not found in environment')
raise NotApplicable()
try:
self.target_versions = json.loads(os.environ[var])
except:
perror(var, 'misformatted variable:', os.environ[var])
raise NotApplicable()
for key in self.target_versions:
if type(self.target_versions[key]) != list:
continue
self.target_versions[key] = list(map(lambda x: str(x), self.target_versions[key]))
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname not in ['Linux']:
perror('module not supported on', self.sysname)
raise NotApplicable()
def get_versions(self):
self.get_bios_version_Linux()
self.get_qla_version_Linux()
self.get_lpfc_version_Linux()
def get_qla_version_Linux(self):
self.versions['qla2xxx'] = None
self.versions['qla2xxx_fw'] = None
import glob
hosts = glob.glob('/sys/bus/pci/drivers/qla2*/*:*:*/host*')
if len(hosts) == 0:
return
hosts_proc = map(lambda x: '/proc/scsi/qla2xxx/'+os.path.basename(x).replace('host', ''), hosts)
hosts = map(lambda x: '/sys/class/fc_host/'+os.path.basename(x)+'/symbolic_name', hosts)
for i, host in enumerate(hosts):
if os.path.exists(host):
with open(host, 'r') as f:
buff = f.read()
l = buff.split()
for e in l:
if e.startswith("DVR:"):
self.versions['qla2xxx'] = e.replace("DVR:", "")
elif e.startswith("FW:"):
v = e.replace("FW:", "")
# store the lowest firmware version
if self.versions['qla2xxx_fw'] is None or V(self.versions['qla2xxx_fw']) > V(v):
self.versions['qla2xxx_fw'] = v
elif os.path.exists(hosts_proc[i]):
with open(hosts_proc[i], 'r') as f:
buff = f.read()
for line in buff.split('\n'):
if "Firmware version" not in line:
continue
l = line.split()
n_words = len(l)
idx = l.index("Driver") + 2
if idx <= n_words:
self.versions['qla2xxx'] = l[idx]
idx = l.index("Firmware") + 2
if idx <= n_words:
v = l[idx]
if self.versions['qla2xxx_fw'] is None or V(self.versions['qla2xxx_fw']) > V(v):
self.versions['qla2xxx_fw'] = v
def get_lpfc_version_Linux(self):
self.versions['lpfc'] = None
self.versions['lpfc_fw'] = None
import glob
hosts = glob.glob('/sys/class/scsi_host/host*/fwrev')
if len(hosts) == 0:
return
for host in hosts:
with open(host, 'r') as f:
buff = f.read()
l = buff.split()
if self.versions['lpfc_fw'] is None or V(self.versions['lpfc_fw']) > V(l[0]):
self.versions['lpfc_fw'] = l[0]
if self.versions['lpfc_fw'] is None:
# no need to fetch module version if no hardware
return
cmd = ['modinfo', 'lpfc']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
out = bdecode(out)
for line in out.splitlines():
if line.startswith('version:'):
self.versions['lpfc'] = line.split()[1]
return
def get_bios_version_Linux(self):
p = os.path.join(os.sep, 'sys', 'class', 'dmi', 'id', 'bios_version')
try:
f = open(p, 'r')
ver = f.read().strip()
f.close()
self.versions['server'] = ver
return
except:
pass
try:
cmd = ['dmidecode']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
out = bdecode(out)
for line in out.splitlines():
if 'Version:' in line:
self.versions['server'] = line.split(':')[-1].strip()
return
raise
except:
pinfo('can not fetch bios version')
return
def fixable(self):
return RET_NA
def check(self):
self.get_versions()
r = RET_OK
for key in self.target_versions:
if key not in self.versions:
perror("TODO: get", key, "version")
continue
if type(self.versions[key]) not in (str, unicode):
pinfo("no", key)
continue
if type(self.target_versions[key]) == list and \
self.versions[key] not in self.target_versions[key]:
perror(key, "version is %s, target %s"%(self.versions[key], ' or '.join(self.target_versions[key])))
r |= RET_ERR
elif type(self.target_versions[key]) != list and \
self.versions[key] != self.target_versions[key]:
perror(key, "version is %s, target %s"%(self.versions[key], self.target_versions[key]))
r |= RET_ERR
else:
pinfo(key, "version is %s, on target"%self.versions[key])
continue
return r
def fix(self):
return RET_NA
if __name__ == "__main__":
syntax = """syntax:
%s TARGET check|fixable|fix"""%sys.argv[0]
if len(sys.argv) != 3:
perror("wrong number of arguments")
perror(syntax)
sys.exit(RET_ERR)
try:
o = CompFirmware(sys.argv[1])
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except NotApplicable:
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
opensvc-1.8~20170412/var/compliance/com.opensvc/ansible_playbook.py 0000755 0001750 0001750 00000012246 13073467726 025316 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_ANSIBLE_PLAYBOOK_",
"example_value": """
{
"path": "/some/path/to/file",
"fmt": "---",
}
""",
"description": """* Fetch a playbook from a href if required
* Run the playbook in check mode on check action
* Run the playbook on fix action
""",
"form_definition": """
Desc: |
Define or point to a ansible playbook.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: file
Type: json
Format: dict
Inputs:
-
Id: ref
Label: Content URL pointer
DisplayModeLabel: ref
LabelCss: loc
Help: "Examples:
/path/to/reference_file
http://server/path/to/reference_file
https://server/path/to/reference_file
ftp://server/path/to/reference_file
ftp://login:pass@server/path/to/reference_file"
Type: string
-
Id: fmt
Label: Content
DisplayModeLabel: fmt
LabelCss: hd16
Css: pre
Help: A reference content for the file. The text can embed substitution variables specified with %%ENV:VAR%%.
Type: text
"""
}
import os
import sys
import stat
import re
import tempfile
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class InitError(Exception):
pass
class AnsiblePlaybook(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.rules = []
self.inventory = os.path.join(os.environ["OSVC_PATH_COMP"], ".ansible-inventory")
for rule in self.get_rules():
try:
self.rules += self.add_rule(rule)
except InitError:
continue
except ValueError:
perror('ansible_playbook: failed to parse variable', os.environ[k])
if len(self.rules) == 0:
raise NotApplicable()
def add_rule(self, d):
if 'fmt' not in d and 'ref' not in d:
perror('file: fmt or ref should be in the dict:', d)
RET = RET_ERR
return []
if 'fmt' in d and 'ref' in d:
perror('file: fmt and ref are exclusive:', d)
RET = RET_ERR
return []
return [d]
def download(self, d):
if 'ref' in d and d['ref'].startswith("safe://"):
return self.get_safe_file(d["ref"])
elif 'fmt' in d and d['fmt'] != "":
return self.write_fmt(d)
else:
return self.download_url()
def download_url(self, d):
f = tempfile.NamedTemporaryFile()
tmpf = f.name
f.close()
try:
self.urlretrieve(d['ref'], tmpf)
except IOError as e:
perror("file ref", d['ref'], "download failed:", e)
raise InitError()
return tmpf
def get_safe_file(self, uuid):
tmpf = tempfile.NamedTemporaryFile()
tmpfname = tmpf.name
tmpf.close()
try:
self.collector_safe_file_download(uuid, tmpfname)
except Exception as e:
raise ComplianceError("%s: %s" % (uuid, str(e)))
return tmpfname
def write_fmt(self, f):
tmpf = tempfile.NamedTemporaryFile()
tmpfname = tmpf.name
tmpf.close()
with open(tmpfname, 'w') as tmpf:
tmpf.write(f['fmt'])
return tmpfname
def write_inventory(self):
if os.path.exists(self.inventory):
return
with open(self.inventory, 'w') as ofile:
ofile.write("[local]\n127.0.0.1\n")
def fixable(self):
return RET_NA
def fix_playbook(self, rule, verbose=False):
tmpfname = self.download(rule)
try:
return self._fix_playbook(rule, tmpfname, verbose=verbose)
finally:
os.unlink(tmpfname)
def _fix_playbook(self, rule, tmpfname, verbose=False):
self.write_inventory()
cmd = ["ansible-playbook", "-c", "local", "-i", self.inventory, tmpfname]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
pinfo(out)
perror(err)
if proc.returncode != 0:
return RET_ERR
if "failed=0" in out:
return RET_OK
return RET_ERR
def check_playbook(self, rule, verbose=False):
tmpfname = self.download(rule)
try:
return self._check_playbook(rule, tmpfname, verbose=verbose)
finally:
os.unlink(tmpfname)
def _check_playbook(self, rule, tmpfname, verbose=False):
self.write_inventory()
cmd = ["ansible-playbook", "-c", "local", "-i", self.inventory, "--check", tmpfname]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
pinfo(out)
perror(err)
if proc.returncode != 0:
return RET_ERR
if "changed=0" in out and "failed=0" in out:
return RET_OK
return RET_ERR
def check(self):
r = 0
for rule in self.rules:
r |= self.check_playbook(rule, verbose=True)
return r
def fix(self):
r = 0
for rule in self.rules:
r |= self.fix_playbook(rule)
return r
if __name__ == "__main__":
main(AnsiblePlaybook)
opensvc-1.8~20170412/var/compliance/com.opensvc/zpool.py 0000755 0001750 0001750 00000012652 13073467726 023145 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_ZPOOL_",
"example_value": """
[
{
"name": "rpool",
"prop": "failmode",
"op": "=",
"value": "continue"
},
{
"name": "rpool",
"prop": "dedupditto",
"op": "<",
"value": 1
},
{
"name": "rpool",
"prop": "dedupditto",
"op": ">",
"value": 0
},
{
"name": "rpool",
"prop": "dedupditto",
"op": "<=",
"value": 1
},
{
"name": "rpool",
"prop": "dedupditto",
"op": ">=",
"value": 1
}
]
""",
"description": """* Check the properties values against their target and operator
* The collector provides the format with wildcards.
* The module replace the wildcards with contextual values.
* In the 'fix' the zpool property is set.
""",
"form_definition": """
Desc: |
A rule to set a list of zpool properties.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: zpool
Inputs:
-
Id: name
Label: Pool Name
DisplayModeLabel: poolname
LabelCss: hd16
Mandatory: Yes
Type: string
Help: The zpool name whose property to check.
-
Id: prop
Label: Property
DisplayModeLabel: property
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property to check.
Candidates:
- readonly
- autoexpand
- autoreplace
- bootfs
- cachefile
- dedupditto
- delegation
- failmode
- listshares
- listsnapshots
- version
-
Id: op_s
Key: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Type: info
Default: "="
ReadOnly: yes
Help: The comparison operator to use to check the property current value.
Condition: "#prop IN readonly,autoexpand,autoreplace,bootfs,cachefile,delegation,failmode,listshares,listsnapshots"
-
Id: op_n
Key: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
StrictCandidates: yes
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the property current value.
Condition: "#prop IN version,dedupditto"
-
Id: value_readonly
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == readonly"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_autoexpand
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == autoexpand"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_autoreplace
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == autoreplace"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_delegation
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == delegation"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_listshares
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == listshares"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_listsnapshots
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == listsnapshots"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_failmode
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == failmode"
StrictCandidates: yes
Candidates:
- "continue"
- "wait"
- "panic"
-
Id: value_bootfs
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == bootfs"
-
Id: value_cachefile
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zpool property target value.
Condition: "#prop == cachefile"
-
Id: value_dedupditto
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: integer
Help: The zpool property target value.
Condition: "#prop == dedupditto"
-
Id: value_version
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: integer
Help: The zpool property target value.
Condition: "#prop == version"
"""
}
import os
import sys
sys.path.append(os.path.dirname(__file__))
from zprop import *
class CompZpool(CompZprop):
def __init__(self, prefix='OSVC_COMP_ZPOOL_'):
CompObject.__init__(self, prefix=prefix, data=data)
self.zbin = "zpool"
if __name__ == "__main__":
main(CompZpool)
opensvc-1.8~20170412/var/compliance/com.opensvc/sysvinit.py 0000755 0001750 0001750 00000017211 13073467726 023666 0 ustar jkelbert jkelbert #!/usr/bin/env python
from subprocess import *
import os
import sys
import glob
import re
sys.path.append(os.path.dirname(__file__))
from comp import *
class InitError(Exception):
pass
class UnknownService(Exception):
pass
class SetError(Exception):
pass
class SeqError(Exception):
pass
class DupError(Exception):
pass
class SysVInit(object):
def __init__(self):
self.load()
def __str__(self):
s = ""
for svc in self.services:
s += "%-20s %s\n"%(svc, ' '.join(map(lambda x: '%-4s'%x, str(self.services[svc]))))
return s
def get_svcname(self, s):
_s = os.path.basename(s)
_svcname = re.sub(r'^[SK][0-9]+', '', _s)
_seq = re.sub(r'[KS](\d+).+', r'\1', _s)
if _s[0] == 'S':
_state = 'on'
elif _s[0] == 'K':
_state = 'off'
else:
raise InitError("unexepected service name: %s"%s)
return _state, _seq, _svcname
def load(self):
self.services = {}
self.levels = (0, 1, 2, 3, 4, 5, 6)
default = "none"
self.base_d = "/etc"
self.init_d = self.base_d + "/init.d"
if not os.path.exists(self.init_d):
self.base_d = "/sbin"
self.init_d = self.base_d + "/init.d"
if not os.path.exists(self.init_d):
raise InitError("init dir not found")
for l in self.levels:
for s in glob.glob("%s/rc%d.d/[SK]*"%(self.base_d, l)):
state, seq, svc = self.get_svcname(s)
if svc not in self.services:
self.services[svc] = {seq: [default, default, default, default, default, default, default]}
if seq not in self.services[svc]:
self.services[svc][seq] = [default, default, default, default, default, default, default]
self.services[svc][seq][l] = state
def activate(self, service, levels, seq):
for l in levels:
self.activate_one(service, levels, seq)
def activate_one(self, service, level, seq):
if len(service) == 0:
SetError("service is empty")
start_l = "S%s%s"%(seq,service)
svc_p = "../init.d/"+service
os.chdir(self.base_d+"/rc%s.d"%level)
g = glob.glob("[SK]*%s"%service)
if len(g) > 0:
cmd = ['rm', '-f'] + g
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
cmd = ['ln', '-sf', svc_p, start_l]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
def deactivate_one(self, service, level, seq):
if len(service) == 0:
SetError("service is empty")
stop_l = "K%s%s"%(seq,service)
svc_p = "../init.d/"+service
os.chdir(self.base_d+"/rc%s.d"%level)
g = glob.glob("[SK]*%s"%service)
if len(g) > 0:
cmd = ['rm', '-f'] + g
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
cmd = ['ln', '-sf', svc_p, stop_l]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
def delete_one(self, service, level):
if len(service) == 0:
SetError("service is empty")
g = glob.glob(self.base_d+"/rc%s.d"%level+"/*"+service)
if len(g) == 0:
return
cmd = ['rm', '-f'] + g
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise SetError()
def check_init(self, service):
init_f = os.path.join(self.init_d, service)
if os.path.exists(init_f):
return True
return False
def set_state(self, service, level, state, seq):
if service in self.services and seq in self.services[service]:
curstates = self.services[service][seq]
if state != "del" and len(curstates) == 1 and curstates[int(level)] == state or \
state == "del" and len(curstates) == 1 and curstates[int(level)] == "none":
return
if state == "on":
self.activate_one(service, level, seq)
elif state == "off":
self.deactivate_one(service, level, seq)
elif state == "del":
self.delete_one(service, level)
else:
raise SetError()
def get_state(self, service, level, seq):
if service not in self.services:
raise UnknownService()
# compute the number of different launcher for this service in the runlevel
l = []
for _seq in self.services[service]:
if self.services[service][_seq][level] != "none":
l.append(self.services[service][_seq][level])
if seq is None:
if len(l) == 0:
return "none"
raise SeqError()
if len(l) > 1:
raise DupError()
try:
curstates = self.services[service][seq]
curstate = curstates[int(level)]
except:
curstate = "none"
if len(l) == 1 and curstate == "none":
raise SeqError()
return curstate
def check_state(self, service, levels, state, seq=None, verbose=False):
r = 0
if seq is not None and type(seq) == int:
seq = "%02d"%seq
if not self.check_init(service):
if verbose:
perror("service %s init script does not exist in %s"%(service, self.init_d))
r |= 1
if seq is None and state != "del":
if verbose:
perror("service %s sequence number must be set"%(service))
return 1
for level in levels:
try:
level = int(level)
except:
continue
try:
curstate = self.get_state(service, level, seq)
except DupError:
if verbose:
perror("service %s has multiple launchers at level %d"%(service, level))
r |= 1
continue
except SeqError:
if verbose:
perror("service %s sequence number error at level %d"%(service, level))
r |= 1
continue
except UnknownService:
curstate = "none"
if (state != "del" and curstate != state) or \
(state == "del" and curstate != "none"):
if verbose:
perror("service", service, "at runlevel", level, "is in state", curstate, "! target state is", state)
r |= 1
else:
if verbose:
pinfo("service", service, "at runlevel", level, "is in state", curstate)
return r
def fix_state(self, service, levels, state, seq=None):
if seq is not None and type(seq) == int:
seq = "%02d"%seq
if seq is None and state != "del":
perror("service %s sequence number must be set"%(service))
return 1
for level in levels:
try:
self.set_state(service, level, state, seq)
except SetError:
perror("failed to set", service, "runlevels")
return 1
return 0
if __name__ == "__main__":
o = SysVInit()
pinfo(o)
pinfo('xfs@rc3 =', o.get_state('xfs', 3))
opensvc-1.8~20170412/var/compliance/com.opensvc/remove_files.py 0000755 0001750 0001750 00000004477 13073467726 024467 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_REMOVE_FILES_",
"example_value": """
[
"/tmp/foo",
"/bar/to/delete"
]
""",
"description": """* Verify files and file trees are uninstalled
""",
"form_definition": """
Desc: |
A rule defining a set of files to remove, fed to the 'remove_files' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: remove_files
Type: json
Format: list
Inputs:
-
Id: path
Label: File path
DisplayModeLabel: ""
LabelCss: edit16
Mandatory: Yes
Help: You must set paths in fully qualified form.
Type: string
""",
}
import os
import sys
import re
import json
from glob import glob
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"/",
"/root"
]
class CompRemoveFiles(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
patterns = self.get_rules()
patterns = sorted(list(set(patterns)))
self.files = self.expand_patterns(patterns)
if len(self.files) == 0:
pinfo("no files matching patterns")
raise NotApplicable
def expand_patterns(self, patterns):
l = []
for pattern in patterns:
l += glob(pattern)
return l
def fixable(self):
return RET_NA
def check_file(self, _file):
if not os.path.exists(_file):
pinfo(_file, "does not exist. on target.")
return RET_OK
perror(_file, "exists. shouldn't")
return RET_ERR
def fix_file(self, _file):
if not os.path.exists(_file):
return RET_OK
try:
if os.path.isdir(_file) and not os.path.islink(_file):
shutil.rmtree(_file)
else:
os.unlink(_file)
pinfo(_file, "deleted")
except Exception as e:
perror("failed to delete", _file, "(%s)"%str(e))
return RET_ERR
return RET_OK
def check(self):
r = 0
for _file in self.files:
r |= self.check_file(_file)
return r
def fix(self):
r = 0
for _file in self.files:
r |= self.fix_file(_file)
return r
if __name__ == "__main__":
main(CompRemoveFiles)
opensvc-1.8~20170412/var/compliance/com.opensvc/bios.py 0000755 0001750 0001750 00000004165 13073467726 022736 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_BIOS_",
"example_value": "0.6.0",
"description": """* Checks an exact BIOS version, as returned by dmidecode or sysfs
* Module need to be called with the exposed bios version as variable (bios.py $OSVC_COMP_TEST_BIOS_1 check)
""",
}
import os
import sys
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompBios(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.rules = self.get_rules_raw()
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname not in ['Linux']:
perror('module not supported on', self.sysname)
raise NotApplicable()
def get_bios_version_Linux(self):
p = os.path.join(os.sep, 'sys', 'class', 'dmi', 'id', 'bios_version')
try:
f = open(p, 'r')
ver = f.read().strip()
f.close()
return ver
except:
pass
try:
cmd = ['dmidecode', '-t', 'bios']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise
out = bdecode(out)
for line in out.splitlines():
if 'Version:' in line:
return line.split(':')[-1].strip()
raise
except:
perror('can not fetch bios version')
return None
return ver
def fixable(self):
return RET_NA
def check(self):
self.ver = self.get_bios_version_Linux()
if self.ver is None:
return RET_NA
r = RET_OK
for rule in self.rules:
r |= self._check(rule)
return r
def _check(self, rule):
if self.ver == rule:
pinfo("bios version is %s, on target" % self.ver)
return RET_OK
perror("bios version is %s, target %s" % (self.ver, rule))
return RET_ERR
def fix(self):
return RET_NA
if __name__ == "__main__":
main(CompBios)
opensvc-1.8~20170412/var/compliance/com.opensvc/self.signed.cert.py 0000755 0001750 0001750 00000012371 13073467726 025135 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_CERT_",
"example_value": """
{
"CN": "%%ENV:SERVICES_SVCNAME%%",
"crt": "/srv/%%ENV:SERVICES_SVCNAME%%/data/nginx/conf/ssl/server.crt",
"key": "/srv/%%ENV:SERVICES_SVCNAME%%/data/nginx/conf/ssl/server.key",
"bits": 2048,
"C": "FR",
"ST": "Ile de France",
"L": "Paris",
"O": "OpenSVC",
"OU": "Lab",
"emailAddress": "support@opensvc.com",
"alt_names": [
{
"dns": ""
}
]
}
""",
"description": """* Check the existance of a key/crt pair
* Create the key/crt pair
""",
"form_definition": """
Desc: |
Describe a self-signed certificate
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict
Class: authkey
Inputs:
-
Id: CN
Label: Common name
DisplayModeLabel: cn
LabelCss: loc
Mandatory: Yes
Type: string
-
Id: crt
Label: Cert path
DisplayModeLabel: crt
LabelCss: key
Mandatory: Yes
Type: string
Help: Where to install the generated certificate
-
Id: key
Label: Key path
DisplayModeLabel: key
LabelCss: key
Mandatory: Yes
Type: string
Help: Where to install the generated key
-
Id: bits
Label: Bits
DisplayModeLabel: bits
LabelCss: key
Mandatory: Yes
Type: integer
Default: 2048
Help: Defines the key length in bits
-
Id: C
Label: Country name
DisplayModeLabel: country
LabelCss: loc
Mandatory: Yes
Default: FR
Type: string
-
Id: ST
Label: State or Province
DisplayModeLabel: state
LabelCss: loc
Mandatory: Yes
Default: Ile de France
Type: string
-
Id: L
Label: Locality name
DisplayModeLabel: locality
LabelCss: loc
Mandatory: Yes
Default: Paris
Type: string
-
Id: O
Label: Organization name
DisplayModeLabel: org
LabelCss: loc
Mandatory: Yes
Default: OpenSVC
Type: string
-
Id: OU
Label: Organization unit
DisplayModeLabel: org unit
LabelCss: loc
Mandatory: Yes
Default: IT
Type: string
-
Id: emailAddress
Label: Email address
DisplayModeLabel: email
LabelCss: loc
Mandatory: Yes
Default: admin@opensvc.com
Type: string
-
Id: alt_names
Label: Alternate names
DisplayModeLabel: alt names
LabelCss: loc
Type: form
Form: self.signed.cert.alt_names
Default: []
Subform:
Desc: |
Subform for the self.signed.cert form.
Css: comp48
Outputs:
-
Type: json
Format: list of dict
Inputs:
-
Id: dns
Label: DNS
DisplayModeLabel: dns
LabelCss: loc
Type: string
Help: An alternate service name
"""
}
import os
import sys
sys.path.append(os.path.dirname(__file__))
from comp import *
from utilities import which
from subprocess import *
class CompSelfSignedCert(CompObject):
def __init__(self, prefix='OSVC_COMP_CERT_'):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.rules = self.get_rules()
if which("openssl") is None:
raise NotApplicable("openssl command not found")
def check(self):
r = 0
for rule in self.rules:
r |= self.check_rule(rule)
return r
def fix(self):
r = 0
for rule in self.rules:
r |= self.fix_rule(rule)
return r
def check_rule(self, rule):
r = RET_OK
if not os.path.exists(rule["key"]):
perror("key %s does not exist" % rule["key"])
r = RET_ERR
else:
pinfo("key %s exists" % rule["key"])
if not os.path.exists(rule["crt"]):
perror("crt %s does not exist" % rule["crt"])
r = RET_ERR
else:
pinfo("crt %s exists" % rule["crt"])
return r
def fix_rule(self, rule):
if os.path.exists(rule["key"]) and os.path.exists(rule["crt"]):
return RET_OK
for k in ("key", "crt"):
d = os.path.dirname(rule[k])
if not os.path.isdir(d):
if os.path.exists(d):
perror("%s exists but is not a directory" % d)
return RET_ERR
else:
pinfo("mkdir -p %s" %d)
os.makedirs(d)
l = [""]
for k in ["C", "ST", "L", "O", "OU", "CN", "emailAddress"]:
l.append(k+"="+rule[k])
if "alt_names" in rule and len(rule["alt_names"]) > 0:
dns = []
for i, d in enumerate(rule["alt_names"]):
dns.append("DNS.%d=%s" % (i+1, d["DNS"]))
l.append("subjectAltName="+",".join(dns))
l.append("")
cmd = ["openssl", "req", "-x509", "-nodes",
"-newkey", "rsa:%d" % rule["bits"],
"-keyout", rule["key"],
"-out", rule["crt"],
"-days", "XXX",
"-subj", "%s" % "/".join(l)]
pinfo(" ".join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
if len(out) > 0:
pinfo(out)
if len(err) > 0:
perror(err)
return RET_ERR
return RET_OK
if __name__ == "__main__":
main(CompSelfSignedCert)
opensvc-1.8~20170412/var/compliance/com.opensvc/zfs.py 0000755 0001750 0001750 00000021156 13073467726 022603 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_ZFS_",
"example_value": """
[
{
"name": "rpool/swap",
"prop": "aclmode",
"op": "=",
"value": "discard"
},
{
"name": "rpool/swap",
"prop": "copies",
"op": "<",
"value": 1
},
{
"name": "rpool/swap",
"prop": "copies",
"op": ">",
"value": 0
},
{
"name": "rpool/swap",
"prop": "copies",
"op": "<=",
"value": 1
},
{
"name": "rpool/swap",
"prop": "copies",
"op": ">=",
"value": 1
}
]
""",
"description": """* Check the properties values against their target and operator
* The collector provides the format with wildcards.
* The module replace the wildcards with contextual values.
* In the 'fix' the zfs dataset property is set.
""",
"form_definition": """
Desc: |
A rule to set a list of zfs properties.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: zfs dataset
Inputs:
-
Id: name
Label: Dataset Name
DisplayModeLabel: dsname
LabelCss: hd16
Mandatory: Yes
Type: string
Help: The zfs dataset name whose property to check.
-
Id: prop
Label: Property
DisplayModeLabel: property
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property to check.
Candidates:
- aclinherit
- aclmode
- atime
- canmount
- checksum
- compression
- copies
- dedup
- devices
- exec
- keychangedate
- keysource
- logbias
- mountpoint
- nbmand
- primarycache
- quota
- readonly
- recordsize
- refquota
- refreservation
- rekeydate
- reservation
- rstchown
- secondarycache
- setuid
- share.*
- snapdir
- sync
- vscan
- xattr
- zoned
-
Id: op_s
Key: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Type: info
Default: "="
ReadOnly: yes
Help: The comparison operator to use to check the property current value.
Condition: "#prop != copies"
-
Id: op_n
Key: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
StrictCandidates: yes
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the property current value.
Condition: "#prop == copies"
-
Id: value_on_off
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop IN sharenfs,sharesmb"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_on_off_strict
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop IN canmount,atime,readonly,exec,devices,setuid,vscan,xattr,jailed,utf8only"
StrictCandidates: yes
Candidates:
- "on"
- "off"
-
Id: value_n
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: integer
Help: The zfs dataset property target value.
Condition: "#prop IN copies,recordsize,volsize"
-
Id: value_s
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop NOT IN normalization,casesensitivity,sync,volmode,logbias,snapdir,dedup,primarycache,secondarycache,redundant_metadata,checksum,compression,aclinherit,aclmode,copies,recordsize,volsize,canmount,atime,readonly,exec,devices,setuid,vscan,xattr,jailed,utf8only,sharenfs,sharesmb"
-
Id: value_aclinherit
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == aclinherit"
StrictCandidates: yes
Candidates:
- "discard"
- "noallow"
- "restricted"
- "passthrough"
- "passthrough-x"
-
Id: value_aclmode
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == aclmode"
StrictCandidates: yes
Candidates:
- "discard"
- "groupmask"
- "passthrough"
- "restricted"
-
Id: value_checksum
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == checksum"
StrictCandidates: yes
Candidates:
- "on"
- "off"
- "fletcher2"
- "fletcher4"
- "sha256"
- "noparity"
-
Id: value_compression
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == compression"
StrictCandidates: yes
Candidates:
- "on"
- "off"
- "lzjb"
- "gzip"
- "gzip-1"
- "gzip-2"
- "gzip-3"
- "gzip-4"
- "gzip-5"
- "gzip-6"
- "gzip-7"
- "gzip-8"
- "gzip-9"
- "zle"
- "lz4"
-
Id: value_dedup
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == dedup"
StrictCandidates: yes
Candidates:
- "on"
- "off"
- "verify"
- "sha256"
- "sha256,verify"
-
Id: value_primarycache
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop IN primarycache,secondarycache"
StrictCandidates: yes
Candidates:
- "all"
- "none"
- "metadata"
-
Id: value_redundant_metadata
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == redundant_metadata"
StrictCandidates: yes
Candidates:
- "all"
- "most"
-
Id: value_logbias
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == logbias"
StrictCandidates: yes
Candidates:
- "latency"
- "throughput"
-
Id: value_snapdir
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == snapdir"
StrictCandidates: yes
Candidates:
- "hidden"
- "visible"
-
Id: value_sync
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == sync"
StrictCandidates: yes
Candidates:
- "standard"
- "always"
- "disabled"
-
Id: value_volmode
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == volmode"
StrictCandidates: yes
Candidates:
- "default"
- "geom"
- "dev"
- "none"
-
Id: value_casesensitivity
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == casesensitivity"
StrictCandidates: yes
Candidates:
- "sensitive"
- "insensitive"
- "mixed"
-
Id: value_normalization
Key: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string
Help: The zfs dataset property target value.
Condition: "#prop == normalization"
StrictCandidates: yes
Candidates:
- "none"
- "formC"
- "formD"
- "formKC"
- "formKD"
"""
}
import os
import sys
sys.path.append(os.path.dirname(__file__))
from zprop import *
class CompZfs(CompZprop):
def __init__(self, prefix='OSVC_COMP_ZFS_'):
CompObject.__init__(self, prefix=prefix, data=data)
self.zbin = "zfs"
if __name__ == "__main__":
main(CompZfs)
opensvc-1.8~20170412/var/compliance/com.opensvc/keyval_parser.py 0000755 0001750 0001750 00000013603 13073467726 024646 0 ustar jkelbert jkelbert #!/usr/bin/env python
import os
import sys
import datetime
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
class ParserError(Exception):
pass
class Parser(object):
def __init__(self, path, section_markers=None):
self.path = path
self.data = {}
self.changed = False
self.nocf = False
self.keys = []
self.sections = {}
self.section_names = []
self.lastkey = '__lastkey__'
self.comments = {self.lastkey: []}
if section_markers:
self.section_markers = section_markers
else:
self.section_markers = ["Match"]
self.load()
self.bkp = path + '.' + str(datetime.datetime.now())
def __str__(self):
s = ""
for k in self.keys:
if k in self.comments:
s += '\n'.join(self.comments[k]) + '\n'
s += '\n'.join([k + " " + str(v) for v in self.data[k]]) + '\n'
if len(self.comments[self.lastkey]) > 0:
s += '\n'.join(self.comments[self.lastkey])
for section, data in self.sections.items():
s += section + '\n'
for k in data["keys"]:
for v in data["data"][k]:
s += "\t" + k + " " + str(v) + '\n'
return s
def truncate(self, key, max):
if key not in self.data:
return
n = len(self.data[key])
if n <= max:
return
self.data[key] = self.data[key][:max]
self.changed = True
def set(self, key, value, instance=0):
if key not in self.data:
self.data[key] = [value]
self.keys.append(key)
elif instance >= len(self.data[key]):
extra = instance + 1 - len(self.data[key])
for i in range(len(self.data[key]), instance-1):
self.data[key].append(None)
self.data[key].append(value)
else:
self.data[key].insert(instance, value)
self.changed = True
def unset(self, key, value=None):
if key in self.data:
if value is not None and value.strip() != "":
self.data[key].remove(value)
else:
self.data[key] = []
if len(self.data[key]) == 0:
del(self.data[key])
if key in self.keys:
self.keys.remove(key)
self.changed = True
def get(self, key, instance=0):
if key not in self.data:
return
if instance is None:
return self.data[key]
if instance < len(self.data[key]):
return self.data[key][instance]
return
def load(self):
if not os.path.exists(self.path):
raise ParserError("%s does not exist"%self.path)
self.nocf = True
return
with open(self.path, 'r') as f:
buff = f.read()
self.parse(buff)
def backup(self):
if self.nocf:
return
try:
shutil.copy(self.path, self.bkp)
except Exception as e:
perror(e)
raise ParserError("failed to backup %s"%self.path)
pinfo("%s backup up as %s" % (self.path, self.bkp))
def restore(self):
if self.nocf:
return
try:
shutil.copy(self.bkp, self.path)
except:
raise ParserError("failed to restore %s"%self.path)
pinfo("%s restored from %s" % (self.path, self.bkp))
def write(self):
self.backup()
try:
with open(self.path, 'w') as f:
f.write(str(self))
pinfo("%s rewritten"%self.path)
except Exception as e:
perror(e)
self.restore()
raise ParserError()
def parse(self, buff):
section = None
for line in buff.split("\n"):
line = line.strip()
# store comment line and continue
if line.startswith('#') or len(line) == 0:
self.comments[self.lastkey].append(line)
continue
# strip end-of-line comment
try:
i = line.index('#')
line = line[:i]
line = line.strip()
except ValueError:
pass
# discard empty line
if len(line) == 0:
continue
l = line.split()
if len(l) < 2:
continue
key = l[0]
value = line[len(key):].strip()
if key not in self.comments:
self.comments[key] = self.comments[self.lastkey]
else:
self.comments[key] += self.comments[self.lastkey]
self.comments[self.lastkey] = []
try:
value = int(value)
except:
pass
if key in self.section_markers:
section = key + " " + value
if section not in self.sections:
self.sections[section] = {"keys": [], "data": {}}
self.section_names.append(section)
continue
if section:
if key not in self.sections[section]["keys"]:
self.sections[section]["keys"].append(key)
if key not in self.sections[section]["data"]:
self.sections[section]["data"][key] = []
self.sections[section]["data"][key].append(value)
else:
if key not in self.keys:
self.keys.append(key)
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if __name__ == "__main__":
if len(sys.argv) != 2:
perror("wrong number of arguments")
sys.exit(1)
o = Parser(sys.argv[1])
o.get("Subsystem")
o.set("Subsystem", "foo")
o.unset("PermitRootLogin")
o.backup()
pinfo(o)
opensvc-1.8~20170412/var/compliance/com.opensvc/yes 0000755 0001750 0001750 00000000171 13073467726 022144 0 ustar jkelbert jkelbert #!/usr/bin/env python
from __future__ import print_function
try:
while True:
print("yes")
except:
pass
opensvc-1.8~20170412/var/compliance/com.opensvc/vuln.py 0000755 0001750 0001750 00000055120 13073467726 022763 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_VULN_",
"example_value": """
[
{
"pkgname": "kernel",
"minver": "2.6.18-238.19.1.el5",
"firstver": "2.6.18-238"
},
{
"pkgname": "kernel-xen",
"minver": "2.6.18-238.19.1.el5"
}
]
""",
"description": """* Raise an alert if an installed package version is in a version range
* If the package is not installed, do not raise an alert
""",
"form_definition": """
Desc: |
A rule defining a list of vulnerable packages and their minimum release version fixing the vulnerability.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: vuln
Inputs:
-
Id: pkgname
Label: Package name
DisplayModeLabel: pkgname
LabelCss: pkg16
Mandatory: Yes
Type: string
Help: The package name, as known to the target system's package manager.
-
Id: firstver
Label: First vulnerable version
DisplayModeLabel: firstver
LabelCss: pkg16
Mandatory: No
Type: string
Help: The first vulnerable package version. In the security context, the package version introducing the vulnerability.
-
Id: minver
Label: Minimum version
DisplayModeLabel: minver
LabelCss: pkg16
Mandatory: Yes
Type: string
Help: The package minimum version. In the security context, the package version fixing the vulnerability.
"""
}
import os
import sys
import json
import pwd
import sys
import re
import tempfile
from subprocess import *
from distutils.version import LooseVersion as V
from utilities import which
sys.path.append(os.path.dirname(__file__))
from comp import *
def repl(matchobj):
return '.0'+matchobj.group(0)[1:]
class LiveKernVulnerable(Exception):
pass
class CompVuln(CompObject):
def __init__(self, prefix=None, uri=None):
CompObject.__init__(self, prefix=prefix, data=data)
self.uri = uri
def init(self):
self.highest_avail_version = "0"
self.fix_list = []
self.need_pushpkg = False
self.sysname, self.nodename, x, x, self.machine = os.uname()
if 'OSVC_COMP_VULN_STRICT' in os.environ and \
os.environ['OSVC_COMP_VULN_STRICT'] == "true":
self.strict = True
else:
self.strict = False
if 'OSVC_COMP_VULN_PKG_TYPE' in os.environ and \
os.environ['OSVC_COMP_VULN_PKG_TYPE'] == "bundle":
self.pkg_type = 'bundle'
else:
self.pkg_type = 'product'
self.packages = []
for k, rule in self.get_rule_items():
try:
self.packages += self.add_rule(k, rule)
except InitError:
continue
except ValueError:
perror('failed to parse variable', os.environ[k])
if len(self.packages) == 0:
raise NotApplicable()
if self.sysname not in ['Linux', 'HP-UX', 'AIX', 'SunOS']:
perror('module not supported on', self.sysname)
raise NotApplicable()
if 'OSVC_COMP_NODES_OS_VENDOR' not in os.environ:
perror("OS_VENDOR is not set. Check your asset")
raise NotApplicable()
vendor = os.environ['OSVC_COMP_NODES_OS_VENDOR']
if vendor in ['Debian', 'Ubuntu']:
self.get_installed_packages = self.deb_get_installed_packages
self.fix_pkg = self.apt_fix_pkg
self.fixable_pkg = self.apt_fixable_pkg
self.fix_all = None
elif vendor in ['CentOS', 'Redhat', 'Red Hat'] or \
(vendor == 'Oracle' and self.sysname == 'Linux'):
self.get_installed_packages = self.rpm_get_installed_packages
self.fix_pkg = self.yum_fix_pkg
self.fixable_pkg = self.yum_fixable_pkg
self.fix_all = None
elif vendor in ['SuSE']:
self.get_installed_packages = self.rpm_get_installed_packages
self.fix_pkg = self.zyp_fix_pkg
self.fixable_pkg = self.zyp_fixable_pkg
self.fix_all = None
elif vendor in ['HP']:
if self.uri is None:
perror("URI is not set")
raise NotApplicable()
self.get_installed_packages = self.hp_get_installed_packages
self.fix_pkg = self.hp_fix_pkg
self.fixable_pkg = self.hp_fixable_pkg
self.fix_all = self.hp_fix_all
elif vendor in ['IBM']:
self.get_installed_packages = self.aix_get_installed_packages
self.fix_pkg = self.aix_fix_pkg
self.fixable_pkg = self.aix_fixable_pkg
self.fix_all = None
elif vendor in ['Oracle']:
self.get_installed_packages = self.sol_get_installed_packages
self.fix_pkg = self.sol_fix_pkg
self.fixable_pkg = self.sol_fixable_pkg
self.fix_all = None
else:
perror(vendor, "not supported")
raise NotApplicable()
self.installed_packages = self.get_installed_packages()
def add_rule(self, k, o):
o["rule"] = k.replace("OSVC_COMP_", "")
return [o]
def get_free(self, c):
if not os.path.exists(c):
return 0
cmd = ["df", "-k", c]
p = Popen(cmd, stdout=PIPE, stderr=None)
out, err = p.communicate()
out = bdecode(out)
for line in out.split():
if "%" in line:
l = out.split()
for i, w in enumerate(l):
if '%' in w:
break
try:
f = int(l[i-1])
return f
except:
return 0
return 0
def get_temp_dir(self):
if hasattr(self, "tmpd"):
return self.tmpd
candidates = ["/tmp", "/var/tmp", "/root"]
free = {}
for c in candidates:
free[self.get_free(c)] = c
max = sorted(free.keys())[-1]
self.tmpd = free[max]
pinfo("selected %s as temp dir (%d KB free)" % (self.tmpd, max))
return self.tmpd
def download(self, pkg_name):
import urllib
import tempfile
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir())
dname = f.name
f.close()
try:
os.makedirs(dname)
except:
pass
fname = os.path.join(dname, "file")
try:
self.urlretrieve(pkg_name, fname)
except IOError:
try:
os.unlink(fname)
os.unlink(dname)
except:
pass
raise Exception("download failed: %s" % str(e))
import tarfile
os.chdir(dname)
try:
tar = tarfile.open(fname)
except:
pinfo("not a tarball")
return fname
try:
tar.extractall()
except:
try:
os.unlink(fname)
os.unlink(dname)
except:
pass
# must be a pkg
return dname
tar.close()
os.unlink(fname)
return dname
def get_os_ver(self):
cmd = ['uname', '-v']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return 0
out = bdecode(out)
lines = out.splitlines()
if len(lines) == 0:
return 0
try:
osver = float(lines[0])
except:
osver = 0
return osver
def sol_fix_pkg(self, pkg):
r = self.check_pkg(pkg)
if r == RET_OK:
return RET_NA
if 'repo' not in pkg or len(pkg['repo']) == 0:
perror("no repo specified in the rule")
return RET_NA
pkg_url = pkg['repo']+"/"+pkg['pkgname']
pinfo("download", pkg_url)
try:
dname = self.download(pkg_url)
except Exception as e:
perror(e)
return RET_ERR
if pkg["pkgname"] in self.installed_packages:
os.chdir("/")
yes = os.path.dirname(__file__) + "/yes"
cmd = '%s | pkgrm %s' % (yes, pkg['pkgname'])
print(cmd)
r = os.system(cmd)
if r != 0:
return RET_ERR
if os.path.isfile(dname):
d = dname
else:
d = "."
os.chdir(dname)
if self.get_os_ver() < 10:
opts = ''
else:
opts = '-G'
if 'resp' in pkg and len(pkg['resp']) > 0:
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir())
resp = f.name
f.close()
with open(resp, "w") as f:
f.write(pkg['resp'])
else:
resp = "/dev/null"
yes = os.path.dirname(__file__) + "/yes"
cmd = '%s | pkgadd -r %s %s -d %s all' % (yes, resp, opts, d)
print(cmd)
r = os.system(cmd)
os.chdir("/")
if os.path.isdir(dname):
import shutil
shutil.rmtree(dname)
if r != 0:
return RET_ERR
return RET_OK
def sol_fixable_pkg(self, pkg):
return 0
def sol_fix_all(self):
return RET_NA
def sol_get_installed_packages(self):
p = Popen(['pkginfo', '-l'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return {}
out = bdecode(out)
return self.sol_parse_pkginfo(out)
def sol_parse_pkginfo(self, out):
l = {}
for line in out.split('\n'):
v = line.split(':')
if len(v) != 2:
continue
f = v[0].strip()
if f == "PKGINST":
pkgname = v[1].strip()
elif f == "ARCH":
pkgarch = v[1].strip()
elif f == "VERSION":
pkgvers = v[1].strip()
if pkgname in l:
l[pkgname] += [(pkgvers, pkgarch)]
else:
l[pkgname] = [(pkgvers, pkgarch)]
return l
def aix_fix_pkg(self, pkg):
r = self.check_pkg(pkg)
if r == RET_OK:
return RET_NA
cmd = ['nimclient', '-o', 'cust',
'-a', 'lpp_source=%s'%self.uri,
'-a', 'installp_flags=aFQY',
'-a', 'filesets=%s'%pkg['pkgname']]
s = " ".join(cmd)
pinfo(s)
r = os.system(s)
if r != 0:
return RET_ERR
return RET_OK
def aix_fixable_pkg(self, pkg):
return RET_NA
def aix_fix_all(self):
return RET_NA
def aix_get_installed_packages(self):
p = Popen(['lslpp', '-L', '-c'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return {}
out = bdecode(out)
return self.aix_parse_lslpp(out)
def aix_parse_lslpp(self, out):
l = {}
for line in out.split('\n'):
if line.startswith('#') or len(line) == 0:
continue
v = line.split(':')
if len(v) < 3:
continue
pkgname = v[1].replace('-'+v[2], '')
if pkgname in l:
l[pkgname] += [(v[2], "")]
else:
l[pkgname] = [(v[2], "")]
return l
def hp_fix_pkg(self, pkg):
if self.check_pkg(pkg, verbose=False) == RET_OK:
return RET_OK
if self.fixable_pkg(pkg) == RET_ERR:
return RET_ERR
if self.highest_avail_version == "0":
return RET_ERR
if self.strict:
self.fix_list.append(pkg["pkgname"]+',r='+pkg["minver"])
else:
self.fix_list.append(pkg["pkgname"]+',r='+self.highest_avail_version)
self.need_pushpkg = True
self.installed_packages = self.get_installed_packages()
return RET_OK
def hp_fix_all(self):
r = call(['swinstall', '-x', 'allow_downdate=true', '-x', 'autoreboot=true', '-x', 'mount_all_filesystems=false', '-s', self.uri] + self.fix_list)
if r != 0:
return RET_ERR
return RET_OK
def hp_fixable_pkg(self, pkg):
self.highest_avail_version = "0"
if self.check_pkg(pkg, verbose=False) == RET_OK:
return RET_OK
cmd = ['swlist', '-l', self.pkg_type, '-s', self.uri, pkg['pkgname']]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
if "not found on host" in err:
perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver']))
else:
perror('can not fetch available packages list')
return RET_ERR
out = bdecode(out)
l = self.hp_parse_swlist(out)
if len(l) == 0:
perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver']))
return RET_ERR
for v in map(lambda x: x[0], l.values()[0]):
if V(v) > V(self.highest_avail_version):
self.highest_avail_version = v
if V(self.highest_avail_version) < V(pkg['minver']):
perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver']))
return RET_ERR
return RET_OK
def hp_get_installed_packages(self):
p = Popen(['swlist', '-l', self.pkg_type], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return {}
out = bdecode(out)
return self.hp_parse_swlist(out)
def hp_parse_swlist(self, out):
l = {}
for line in out.split('\n'):
if line.startswith('#') or len(line) == 0:
continue
v = line.split()
if len(v) < 2:
continue
if v[0] in l:
l[v[0]] += [(v[1], "")]
else:
l[v[0]] = [(v[1], "")]
return l
def rpm_get_installed_packages(self):
p = Popen(['rpm', '-qa', '--qf', '%{NAME} %{VERSION}-%{RELEASE} %{ARCH}\n'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return {}
l = {}
out = bdecode(out)
for line in out.splitlines():
v = line.split(' ')
if len(v) != 3:
continue
if v[0] in l:
l[v[0]] += [(v[1], v[2])]
else:
l[v[0]] = [(v[1], v[2])]
return l
def deb_get_installed_packages(self):
p = Popen(['dpkg', '-l'], stdout=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
perror('can not fetch installed packages list')
return {}
l = {}
out = bdecode(out)
for line in out.splitlines():
if not line.startswith('ii'):
continue
v = line.split()[1:3]
pkgname = v[0]
pkgname = pkgname.split(':')[0]
l[pkgname] = [(v[1], "")]
return l
def apt_fixable_pkg(self, pkg):
# TODO
return RET_NA
def zyp_fixable_pkg(self, pkg):
return RET_NA
def yum_fixable_pkg(self, pkg):
try:
r = self.check_pkg(pkg, verbose=False)
except LiveKernVulnerable:
r = RET_OK
if r == RET_OK:
return RET_OK
cmd = ['yum', 'list', 'available', pkg['pkgname']]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
if "No matching Packages" in err:
perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver']))
else:
perror('can not fetch available packages list')
return RET_ERR
highest_avail_version = "0"
out = bdecode(out)
for line in out.splitlines():
l = line.split()
if len(l) != 3:
continue
if V(l[1]) > V(highest_avail_version):
highest_avail_version = l[1]
if V(highest_avail_version) < V(pkg['minver']):
perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver']))
return RET_ERR
return RET_OK
def tainted(self, pkg):
if not pkg["pkgname"].startswith("kernel-") and \
not pkg["pkgname"].startswith("linux-image"):
return False
if self.sysname != 'Linux':
return False
if not os.path.exists("/proc/sys/kernel/tainted"):
return False
with open("/proc/sys/kernel/tainted", "r") as f:
buff = f.read()
if buff == "0":
return False
return True
def zyp_fix_pkg(self, pkg):
try:
r = self.check_pkg(pkg, verbose=False)
except LiveKernVulnerable:
r = RET_OK
if r == RET_OK:
return RET_OK
if self.fixable_pkg(pkg) == RET_ERR:
return RET_ERR
r = call(['zypper', 'install', '-y', pkg["pkgname"]])
if r != 0:
return RET_ERR
self.need_pushpkg = True
self.installed_packages = self.get_installed_packages()
return RET_OK
def yum_fix_pkg(self, pkg):
try:
r = self.check_pkg(pkg, verbose=False)
except LiveKernVulnerable:
r = RET_OK
if r == RET_OK:
return RET_OK
if self.fixable_pkg(pkg) == RET_ERR:
return RET_ERR
r = call(['yum', '-y', 'install', pkg["pkgname"]])
if r != 0:
return RET_ERR
self.need_pushpkg = True
self.installed_packages = self.get_installed_packages()
return RET_OK
def apt_fix_pkg(self, pkg):
if self.check_pkg(pkg, verbose=False) == RET_OK:
return RET_OK
r = call(['apt-get', 'install', '-y', '--allow-unauthenticated', pkg["pkgname"]])
if r != 0:
return RET_ERR
self.need_pushpkg = True
self.installed_packages = self.get_installed_packages()
return RET_OK
def get_raw_kver(self):
return os.uname()[2]
def get_kver(self):
s = self.get_raw_kver()
s = s.replace('xen', '')
s = s.replace('hugemem', '')
s = s.replace('smp', '')
s = s.replace('PAE', '')
s = s.replace('.x86_64','')
s = s.replace('.i686','')
return s
def workaround_python_cmp(self, s):
""" python list cmp says a > 9, but rpm says z < 0, ie :
python says 2.6.18-238.el5 > 2.6.18-238.11.1.el5
which is wrong in the POV of the package manager.
replace .[a-z]* by .00000000[a-z] to force the desired behaviour
"""
return re.sub("\.[a-zA-Z]+", repl, s)
def check_pkg(self, pkg, verbose=True):
if not pkg["pkgname"] in self.installed_packages:
if verbose:
pinfo(pkg["pkgname"], "is not installed (%s:not applicable)"%pkg["rule"])
return RET_OK
name = pkg["pkgname"]
if name.startswith("kernel"):
if self.tainted(pkg):
pinfo(name, "booted kernel is tainted", "(%s)"%pkg["rule"])
kver = self.get_raw_kver()
for i in ('xen', 'hugemem', 'smp', 'PAE'):
if kver.endswith(i) and name != "kernel-"+i:
if verbose:
pinfo(name, "bypassed :", i, "kernel booted", "(%s:not applicable)"%pkg["rule"])
return RET_OK
r = RET_OK
max = "0"
max_v = V(max)
ok = []
minver = self.workaround_python_cmp(pkg['minver'])
target = V(minver)
if 'firstver' in pkg and pkg['firstver'] != "":
firstver = self.workaround_python_cmp(pkg['firstver'])
else:
firstver = "0"
firstver_v = V(firstver)
candidates = map(lambda x: [name]+list(x), self.installed_packages[name])
for _name, vers, arch in candidates:
_vers = self.workaround_python_cmp(vers)
actual = V(_vers)
if actual > max_v or max == "0":
max = vers
max_v = actual
if target <= actual or firstver_v > actual:
ok.append((_name, vers, arch))
if max == "0":
# not installed
if verbose:
pinfo(name, "is not installed (%s:not applicable)"%pkg["rule"])
return RET_OK
if name.startswith("kernel"):
kver = self.get_kver()
if len(ok) == 0:
if verbose:
perror(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], candidates)), 'installed and vulnerable. upgrade to', pkg["minver"], "(%s:need upgrade)"%pkg["rule"])
return RET_ERR
elif kver not in map(lambda x: x[1], ok):
if verbose:
perror(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], ok)), "installed and not vulnerable but vulnerable kernel", self.get_raw_kver(), "booted", "(%s:need reboot)"%pkg["rule"])
raise LiveKernVulnerable()
else:
if verbose:
pinfo("kernel", self.get_raw_kver(), "installed, booted and not vulnerable", "(%s:not vulnerable)"%pkg["rule"])
return RET_OK
if len(ok) > 0:
if verbose:
pinfo("%s installed and not vulnerable (%s:not vulnerable)"%(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], ok)), pkg["rule"]))
return RET_OK
if verbose:
perror('package', name+"-"+vers, 'is vulnerable. upgrade to', pkg["minver"], "(%s:need upgrade)"%pkg["rule"])
return RET_ERR
def check(self):
r = 0
for pkg in self.packages:
try:
_r = self.check_pkg(pkg)
r |= _r
except LiveKernVulnerable:
r |= RET_ERR
return r
def fix(self):
r = 0
for pkg in self.packages:
if self.tainted(pkg):
perror(name, "booted kernel is tainted. not safe to upgrade.", "(%s)"%pkg["rule"])
r |= self.fix_pkg(pkg)
if self.fix_all is not None and len(self.fix_list) > 0:
self.fix_all()
if self.need_pushpkg:
self.pushpkg()
return r
def pushpkg(self):
bin = 'nodemgr'
if which(bin) is None:
return
cmd = [bin, 'pushpkg']
pinfo(' '.join(cmd))
p = Popen(cmd)
p.communicate()
def fixable(self):
r = 0
for pkg in self.packages:
r |= self.fixable_pkg(pkg)
return r
if __name__ == "__main__":
main(CompVuln)
opensvc-1.8~20170412/var/compliance/com.opensvc/etcsystem.py 0000755 0001750 0001750 00000015474 13073467726 024027 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_ETCSYSTEM_",
"example_value": """ [{"key": "fcp:fcp_offline_delay", "op": ">=", "value": 21}, {"key": "ssd:ssd_io_time", "op": "=", "value": "0x3C"}] """,
"description": "Checks and setup values in /etc/system respecting strict targets or thresholds.",
"form_definition": """
Desc: |
A rule to set a list of Solaris kernel parameters to be set in /etc/system. Current values can be checked as strictly equal, or superior/inferior to their target value.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: etcsystem
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The /etc/system parameter to check.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The /etc/system parameter target value.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
""",
}
import os
import sys
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class EtcSystem(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.keys = self.get_rules()
if len(self.keys) == 0:
raise NotApplicable()
self.data = {}
self.cf = os.path.join(os.sep, 'etc', 'system')
self.load_file(self.cf)
def fixable(self):
return RET_OK
def load_file(self, p):
if not os.path.exists(p):
perror(p, "does not exist")
return
with open(p, 'r') as f:
buff = f.read()
self.lines = buff.split('\n')
for i, line in enumerate(self.lines):
line = line.strip()
if line.startswith('*'):
continue
if len(line) == 0:
continue
l = line.split()
if l[0] != "set":
continue
if len(l) < 2:
continue
line = ' '.join(l[1:]).split('*')[0]
var, val = line.split('=')
var = var.strip()
val = val.strip()
try:
val = int(val)
except:
pass
if var in self.data:
self.data[var].append([val, i])
else:
self.data[var] = [[val, i]]
def set_val(self, keyname, target, op):
newline = 'set %s = %s'%(keyname, str(target))
if keyname not in self.data:
pinfo("add '%s' to /etc/system"%newline)
self.lines.insert(-1, newline + " * added by opensvc")
else:
ok = 0
for value, ref in self.data[keyname]:
r = self._check_key(keyname, target, op, value, ref, verbose=False)
if r == RET_ERR:
pinfo("comment out line %d: %s"%(ref, self.lines[ref]))
self.lines[ref] = '* '+self.lines[ref]+' * commented out by opensvc'
else:
ok += 1
if ok == 0:
pinfo("add '%s' to /etc/system"%newline)
self.lines.insert(-1, newline + " * added by opensvc")
def get_val(self, keyname):
if keyname not in self.data:
return []
return self.data[keyname]
def _check_key(self, keyname, target, op, value, ref, verbose=True):
r = RET_OK
if value is None:
if verbose:
perror("%s not set"%keyname)
r |= RET_ERR
if op == '=':
if str(value) != str(target):
if verbose:
perror("%s=%s, target: %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
else:
if type(value) != int:
if verbose:
perror("%s=%s value must be integer"%(keyname, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s=%s target: <= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s=%s target: >= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
return r
def check_key(self, key, verbose=True):
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
return RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
return RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
target = key['value']
if op not in ('>=', '<=', '='):
if verbose:
perror("'value' list member 0 must be either '=', '>=' or '<=': %s"%str(key))
return RET_NA
keyname = key['key']
data = self.get_val(keyname)
if len(data) == 0:
perror("%s key is not set"%keyname)
return RET_ERR
r = RET_OK
ok = 0
for value, ref in data:
r |= self._check_key(keyname, target, op, value, ref, verbose)
if r == RET_OK:
ok += 1
if ok > 1:
perror("duplicate lines for key %s"%keyname)
r |= RET_ERR
return r
def fix_key(self, key):
self.set_val(key['key'], key['value'], key['op'])
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def fix(self):
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
self.fix_key(key)
if len(self.keys) > 0:
import datetime
backup = self.cf+str(datetime.datetime.now())
try:
import shutil
shutil.copy(self.cf, backup)
except:
perror("failed to backup %s"%self.cf)
return RET_ERR
try:
with open(self.cf, 'w') as f:
f.write('\n'.join(self.lines))
except:
perror("failed to write %s"%self.cf)
return RET_ERR
return RET_OK
if __name__ == "__main__":
main(EtcSystem)
opensvc-1.8~20170412/var/compliance/com.opensvc/svcconf.py 0000755 0001750 0001750 00000023415 13073467726 023442 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_env": {
"OSVC_COMP_SERVICES_SVCNAME": "testsvc",
},
"example_value": """
[
{
"value": "fd5373b3d938",
"key": "container#1.run_image",
"op": "="
},
{
"value": "/bin/sh",
"key": "container#1.run_command",
"op": "="
},
{
"value": "/opt/%%ENV:SERVICES_SVCNAME%%",
"key": "DEFAULT.docker_data_dir",
"op": "="
},
{
"value": "no",
"key": "container(type=docker).disable",
"op": "="
},
{
"value": 123,
"key": "container(type=docker&&run_command=/bin/sh).newvar",
"op": "="
}
]
""",
"description": """* Setup and verify parameters in a opensvc service configuration.
""",
"form_definition": """
Desc: |
A rule to set a parameter in OpenSVC .conf configuration file. Used by the 'svcconf' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: svcconf
Inputs:
-
Id: key
Label: Key
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help: The OpenSVC .conf parameter to check.
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The OpenSVC .conf parameter value to check.
""",
}
import os
import sys
import json
import re
import copy
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class SvcConf(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.keys = []
if "OSVC_COMP_SERVICES_SVCNAME" not in os.environ:
pinfo("SERVICES_SVCNAME is not set")
raise NotApplicable()
self.svcname = os.environ['OSVC_COMP_SERVICES_SVCNAME']
self.keys = self.get_rules()
try:
self.get_config_file(refresh=True)
except Exception as e:
perror("unable to load service configuration:", str(e))
raise ComplianceError()
self.sanitize_keys()
self.expand_keys()
def get_config_file(self, refresh=False):
if not refresh:
return self.svc_config
cmd = ['svcmgr', '-s', self.svcname, 'json_config']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = bdecode(out)
self.svc_config = json.loads(out)
return self.svc_config
def fixable(self):
return RET_NA
def set_val(self, keyname, target):
if type(target) == int:
target = str(target)
cmd = ['svcmgr', '-s', self.svcname, 'set', '--param', keyname, '--value', target]
pinfo(' '.join(cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return p.returncode
def get_val(self, keyname):
section, var = keyname.split('.')
if section not in self.svc_config:
return None
return self.svc_config[section].get(var)
def _check_key(self, keyname, target, op, value, verbose=True):
r = RET_OK
if value is None:
if verbose:
perror("%s not set"%keyname)
r |= RET_ERR
if op == '=':
if str(value) != str(target):
if verbose:
perror("%s=%s, target: %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
else:
if type(value) != int:
if verbose:
perror("%s=%s value must be integer"%(keyname, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s=%s target: <= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s=%s target: >= %s"%(keyname, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
return r
def check_filter(self, section, filter):
op = None
i = 0
try:
i = filter.index("&&")
op = "and"
except ValueError:
pass
try:
i = filter.index("||")
op = "or"
except ValueError:
pass
if i == 0:
_filter = filter
_tail = ""
else:
_filter = filter[:i]
_tail = filter[i:].lstrip("&&").lstrip("||")
r = self._check_filter(section, _filter)
#pinfo(" _check_filter('%s', '%s') => %s" % (section, _filter, str(r)))
if op == "and":
r &= self.check_filter(section, _tail)
elif op == "or":
r |= self.check_filter(section, _tail)
return r
def _check_filter(self, section, filter):
if "~=" in filter:
return self._check_filter_reg(section, filter)
elif "=" in filter:
return self._check_filter_eq(section, filter)
perror("invalid filter syntax: %s" % filter)
return False
def _check_filter_eq(self, section, filter):
l = filter.split("=")
if len(l) != 2:
perror("invalid filter syntax: %s" % filter)
return False
key, val = l
cur_val = self.svc_config[section].get(key)
if cur_val is None:
return False
if str(cur_val) == str(val):
return True
return False
def _check_filter_reg(self, section, filter):
l = filter.split("~=")
if len(l) != 2:
perror("invalid filter syntax: %s" % filter)
return False
key, val = l
val = val.strip("/")
cur_val = self.svc_config[section].get(key)
if cur_val is None:
return False
reg = re.compile(val)
if reg.match(cur_val):
return True
return False
def resolve_sections(self, s, filter):
"""
s is a ressource section name (fs, container, app, sync, ...)
filter is a regexp like expression
container(type=docker)
fs(mnt~=/.*tools/)
container(type=docker&&run_image~=/opensvc\/collector_web:build.*/)
fs(mnt~=/.*tools/||mnt~=/.*moteurs/)
"""
result = [];
eligiblesections = [];
for section in self.svc_config.keys():
if section.startswith(s+'#') or section == s:
eligiblesections.append(section)
for section in eligiblesections:
if self.check_filter(section, filter):
#pinfo(" =>", section, "matches filter")
result.append(section)
result.sort()
return result
def sanitize_keys(self, verbose=True):
r = RET_OK
for key in self.keys:
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
r |= RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
r |= RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
if op not in ('>=', '<=', '='):
if verbose:
perror("'value' list member 0 must be either '=', '>=' or '<=': %s"%str(key))
r |= RET_NA
if r is not RET_OK:
sys.exit(r)
def expand_keys(self):
expanded_keys = []
for key in self.keys:
keyname = key['key']
target = key['value']
op = key['op']
sectionlist = [];
reg1 = re.compile(r'(.*)\((.*)\)\.(.*)')
reg2 = re.compile(r'(.*)\.(.*)')
m = reg1.search(keyname)
if m:
section = m.group(1)
filter = m.group(2)
var = m.group(3)
sectionlist = self.resolve_sections(section, filter)
for resolvedsection in sectionlist:
newdict = {
'key': '.'.join([resolvedsection, var]),
'op': op,
'value': target
}
expanded_keys.append(newdict)
continue
m = reg2.search(keyname)
if m:
section = m.group(1)
var = m.group(2)
expanded_keys.append(copy.copy(key))
continue
# drop key
self.keys = expanded_keys
def check_key(self, key, verbose=True):
op = key['op']
target = key['value']
keyname = key['key']
value = self.get_val(keyname)
if value is None:
if verbose:
perror("%s key is not set"%keyname)
return RET_ERR
return self._check_key(keyname, target, op, value, verbose)
def fix_key(self, key):
return self.set_val(key['key'], key['value'])
def check(self):
r = 0
for key in self.keys:
r |= self.check_key(key, verbose=True)
return r
def fix(self):
r = 0
for key in self.keys:
if self.check_key(key, verbose=False) == RET_ERR:
r += self.fix_key(key)
return r
if __name__ == "__main__":
main(SvcConf)
opensvc-1.8~20170412/var/compliance/com.opensvc/xinetd.py 0000755 0001750 0001750 00000012612 13073467726 023271 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_XINETD_",
"example_value": """
{
"gssftp": {
"disable": "no",
"server_args": "-l -a -u 022"
}
}""",
"description": """* Setup and verify a xinetd service configuration
""",
"form_definition": """
Desc: |
A rule defining how a xinetd service should be configured
Inputs:
-
Id: xinetdsvc
Label: Service Name
DisplayModeLabel: service
LabelCss: action16
Mandatory: Yes
Help: The xinetd service name, ie the service file name in /etc/xinetd.d/
Type: string
-
Id: disable
Label: Disable
DisplayModeLabel: Disable
LabelCss: action16
Help: Defines if the xinetd service target state is enabled or disabled
Type: string
Default: yes
Candidates:
- "yes"
- "no"
-
Id: server_args
Label: Server Args
DisplayModeLabel: args
LabelCss: action16
Help: Command line parameter to pass to the service's server executable
Type: string
""",
}
import os
import sys
import json
import pwd
sys.path.append(os.path.dirname(__file__))
from comp import *
class Xinetd(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.base = os.path.join(os.sep, "etc", "xinetd.d")
if not os.path.exists(self.base):
perror(self.base, 'does not exist')
raise NotApplicable()
self.svcs = {}
for d in self.get_rules():
self.svcs.update(d)
if len(self.svcs) == 0:
raise NotApplicable()
self.cf_d = {}
self.known_props = (
"flags",
"socket_type",
"wait",
"user",
"server",
"server_args",
"disable")
def fixable(self):
return RET_NA
def get_svc(self, svc):
if svc in self.cf_d:
return self.cf_d[svc]
p = os.path.join(self.base, svc)
if not os.path.exists(p):
self.cf_d[svc] = {}
return self.cf_d[svc]
if svc not in self.cf_d:
self.cf_d[svc] = {}
with open(p, 'r') as f:
for line in f.read().split('\n'):
if '=' not in line:
continue
l = line.split('=')
if len(l) != 2:
continue
var = l[0].strip()
val = l[1].strip()
self.cf_d[svc][var] = val
return self.cf_d[svc]
def fix_item(self, svc, item, target):
if item not in self.known_props:
perror('xinetd service', svc, item+': unknown property in compliance rule')
return RET_ERR
cf = self.get_svc(svc)
if item in cf and cf[item] == target:
return RET_OK
p = os.path.join(self.base, svc)
if not os.path.exists(p):
perror(p, "does not exist")
return RET_ERR
done = False
with open(p, 'r') as f:
buff = f.read().split('\n')
for i, line in enumerate(buff):
if '=' not in line:
continue
l = line.split('=')
if len(l) != 2:
continue
var = l[0].strip()
if var != item:
continue
l[1] = target
buff[i] = "= ".join(l)
done = True
if not done:
with open(p, 'r') as f:
buff = f.read().split('\n')
for i, line in enumerate(buff):
if '=' not in line:
continue
l = line.split('=')
if len(l) != 2:
continue
buff.insert(i, item+" = "+target)
done = True
break
if not done:
perror("failed to set", item, "=", target, "in", p)
return RET_ERR
with open(p, 'w') as f:
f.write("\n".join(buff))
pinfo("set", item, "=", target, "in", p)
return RET_OK
def check_item(self, svc, item, target, verbose=False):
if item not in self.known_props:
perror('xinetd service', svc, item+': unknown property in compliance rule')
return RET_ERR
cf = self.get_svc(svc)
if item in cf and target == cf[item]:
if verbose:
pinfo('xinetd service', svc, item+':', cf[item])
return RET_OK
elif item in cf:
if verbose:
perror('xinetd service', svc, item+':', cf[item], 'target:', target)
else:
if verbose:
perror('xinetd service', svc, item+': unset', 'target:', target)
return RET_ERR
def check_svc(self, svc, props):
r = 0
for prop in props:
r |= self.check_item(svc, prop, props[prop], verbose=True)
return r
def fix_svc(self, svc, props):
r = 0
for prop in props:
r |= self.fix_item(svc, prop, props[prop])
return r
def check(self):
r = 0
for svc, props in self.svcs.items():
r |= self.check_svc(svc, props)
return r
def fix(self):
r = 0
for svc, props in self.svcs.items():
r |= self.fix_svc(svc, props)
return r
if __name__ == "__main__":
main(Xinetd)
opensvc-1.8~20170412/var/compliance/com.opensvc/fileprop.py 0000755 0001750 0001750 00000022637 13073467726 023626 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_FILEPROP_",
"example_value": """
{
"path": "/some/path/to/file",
"mode": "750",
"uid": 500,
"gid": 500,
}
""",
"description": """* Verify file existance, mode and ownership.
* The collector provides the format with wildcards.
* The module replace the wildcards with contextual values.
In fix() the file is created empty with the right mode & ownership.
""",
"form_definition": """
Desc: |
A fileprop rule, fed to the 'fileprop' compliance object to verify the target file ownership and permissions.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: fileprop
Type: json
Format: dict
Inputs:
-
Id: path
Label: Path
DisplayModeLabel: path
LabelCss: action16
Mandatory: Yes
Help: File path to check the ownership and permissions for.
Type: string
-
Id: mode
Label: Permissions
DisplayModeLabel: perm
LabelCss: action16
Help: "In octal form. Example: 644"
Type: integer
-
Id: uid
Label: Owner
DisplayModeLabel: uid
LabelCss: guy16
Help: Either a user ID or a user name
Type: string or integer
-
Id: gid
Label: Owner group
DisplayModeLabel: gid
LabelCss: guy16
Help: Either a group ID or a group name
Type: string or integer
""",
}
import os
import sys
import json
import stat
import re
import pwd
import grp
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompFileProp(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self._usr = {}
self._grp = {}
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.files = []
for rule in self.get_rules():
try:
self.files += self.add_file(rule)
except InitError:
continue
except ValueError:
perror('fileprop: failed to parse variable', os.environ[k])
if len(self.files) == 0:
raise NotApplicable()
def add_file(self, d):
if 'path' not in d:
perror('fileprop: path should be in the dict:', d)
RET = RET_ERR
return []
try:
d["uid"] = int(d["uid"])
except:
pass
try:
d["gid"] = int(d["gid"])
except:
pass
return [d]
def fixable(self):
return RET_NA
def check_file_type(self, f, verbose=False):
r = RET_OK
if not os.path.exists(f["path"].rstrip("/")):
if verbose: perror("fileprop:", f["path"], "does not exist")
r = RET_ERR
elif f["path"].endswith("/") and not os.path.isdir(f["path"]):
if verbose: perror("fileprop:", f["path"], "exists but is not a directory")
r = RET_ERR
elif not f["path"].endswith("/") and os.path.isdir(f["path"]):
if verbose: perror("fileprop:", f["path"], "exists but is a directory")
r = RET_ERR
return r
def check_file_mode(self, f, verbose=False):
if 'mode' not in f:
return RET_OK
try:
mode = oct(stat.S_IMODE(os.stat(f['path']).st_mode))
except:
if verbose: perror("fileprop:", f['path'], 'can not stat file')
return RET_ERR
mode = str(mode).lstrip("0")
if mode != str(f['mode']):
if verbose: perror("fileprop:", f['path'], 'mode should be %s but is %s'%(f['mode'], mode))
return RET_ERR
return RET_OK
def get_uid(self, uid):
if uid in self._usr:
return self._usr[uid]
tuid = uid
if isinstance(uid, (str, unicode)):
try:
info=pwd.getpwnam(uid)
tuid = info[2]
self._usr[uid] = tuid
except:
perror("fileprop:", "user %s does not exist"%uid)
raise ComplianceError()
return tuid
def get_gid(self, gid):
if gid in self._grp:
return self._grp[gid]
tgid = gid
if isinstance(gid, (str, unicode)):
try:
info=grp.getgrnam(gid)
tgid = info[2]
self._grp[gid] = tgid
except:
perror("fileprop:", "group %s does not exist"%gid)
raise ComplianceError()
return tgid
def check_file_uid(self, f, verbose=False):
if 'uid' not in f:
return RET_OK
tuid = self.get_uid(f['uid'])
try:
uid = os.stat(f['path']).st_uid
except:
if verbose: perror("fileprop:", f['path'], 'can not stat file')
return RET_ERR
if uid != tuid:
if verbose: perror("fileprop:", f['path'], 'uid should be %s but is %s'%(tuid, str(uid)))
return RET_ERR
return RET_OK
def check_file_gid(self, f, verbose=False):
if 'gid' not in f:
return RET_OK
tgid = self.get_gid(f['gid'])
try:
gid = os.stat(f['path']).st_gid
except:
if verbose: perror("fileprop:", f['path'], 'can not stat file')
return RET_ERR
if gid != tgid:
if verbose: perror("fileprop:", f['path'], 'gid should be %s but is %s'%(tgid, str(gid)))
return RET_ERR
return RET_OK
def check_file_exists(self, f):
if not os.path.exists(f['path']):
return RET_ERR
return RET_OK
def check_file(self, f, verbose=False):
if self.check_file_type(f, verbose) == RET_ERR:
return RET_ERR
r = 0
r |= self.check_file_mode(f, verbose)
r |= self.check_file_uid(f, verbose)
r |= self.check_file_gid(f, verbose)
if r == 0 and verbose:
pinfo("fileprop:", f['path'], "is ok")
return r
def fix_file_mode(self, f):
if 'mode' not in f:
return RET_OK
if self.check_file_mode(f) == RET_OK:
return RET_OK
try:
pinfo("fileprop:", "%s mode set to %s"%(f['path'], str(f['mode'])))
os.chmod(f['path'], int(str(f['mode']), 8))
except:
return RET_ERR
return RET_OK
def fix_file_owner(self, f):
uid = -1
gid = -1
if 'uid' not in f and 'gid' not in f:
return RET_OK
if 'uid' in f and self.check_file_uid(f) != RET_OK:
uid = self.get_uid(f['uid'])
if 'gid' in f and self.check_file_gid(f) != RET_OK:
gid = self.get_gid(f['gid'])
if uid == -1 and gid == -1:
return RET_OK
try:
os.chown(f['path'], uid, gid)
except:
perror("fileprop:", "failed to set %s ownership to %d:%d"%(f['path'], uid, gid))
return RET_ERR
pinfo("fileprop:", "%s ownership set to %d:%d"%(f['path'], uid, gid))
return RET_OK
def fix_file_notexists(self, f):
if not os.path.exists(f['path'].rstrip("/")):
if f['path'].endswith("/"):
try:
os.makedirs(f['path'])
pinfo("fileprop:", f['path'], "created")
except:
perror("fileprop:", "failed to create", f['path'])
return RET_ERR
return RET_OK
else:
dirname = os.path.dirname(f['path'])
if not os.path.exists(dirname):
pinfo("fileprop:", "create", dirname)
try:
os.makedirs(dirname)
except Exception as e:
perror("fileprop:", "failed to create", dirname)
return RET_ERR
pinfo("fileprop:", "touch", f['path'])
open(f['path'], 'a').close()
elif f['path'].endswith("/") and not os.path.isdir(f['path']):
pinfo("fileprop:", "delete file", f['path'].rstrip("/"))
try:
os.unlink(f['path'].rstrip("/"))
except Exception as e:
perror("fileprop:", e)
return RET_ERR
pinfo("fileprop:", "make directory", f['path'])
try:
os.makedirs(f['path'])
except Exception as e:
perror("fileprop:", e)
return RET_ERR
elif not f['path'].endswith("/") and os.path.isdir(f['path']):
perror("fileprop:", "cowardly refusing to remove the existing", f['path'], "directory to create a regular file")
return RET_ERR
if self.check_file_exists(f) == RET_OK:
return RET_OK
d = os.path.dirname(f['path'])
if not os.path.exists(d):
os.makedirs(d)
try:
os.chown(d, f['uid'], f['gid'])
except:
pass
try:
with open(f['path'], 'w') as fi:
fi.write('')
except:
return RET_ERR
pinfo("fileprop:", f['path'], "created")
return RET_OK
def check(self):
r = 0
for f in self.files:
r |= self.check_file(f, verbose=True)
return r
def fix(self):
r = 0
for f in self.files:
r |= self.fix_file_notexists(f)
r |= self.fix_file_mode(f)
r |= self.fix_file_owner(f)
return r
if __name__ == "__main__":
main(CompFileProp)
opensvc-1.8~20170412/var/compliance/com.opensvc/symlink.py 0000755 0001750 0001750 00000007351 13073467726 023470 0 ustar jkelbert jkelbert #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_FILE_",
"example_value": """
{
"symlink": "/tmp/foo",
"target": "/tmp/bar"
}
""",
"description": """* Verify symlink's existance.
* The collector provides the format with wildcards.
* The module replace the wildcards with contextual values.
* In the 'fix' the symlink is created (and intermediate dirs if required).
* There is no check or fix for target's existance.
* There is no check or fix for mode or ownership of either symlink or target.
""",
"form_definition": """
Desc: |
A symfile rule, fed to the 'symlink' compliance object to create a Unix symbolic link.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: symlink
Type: json
Format: dict
Inputs:
-
Id: symlink
Label: Symlink path
DisplayModeLabel: symlink
LabelCss: hd16
Mandatory: Yes
Help: The full path of the symbolic link to check or create.
Type: string
-
Id: target
Label: Target path
DisplayModeLabel: target
LabelCss: hd16
Mandatory: Yes
Help: The full path of the target file pointed by the symlink.
Type: string
"""
}
import os
import errno
import sys
import stat
import re
import pwd
import grp
sys.path.append(os.path.dirname(__file__))
from comp import *
class InitError(Exception):
pass
class CompSymlink(CompObject):
def __init__(self, prefix='OSVC_COMP_SYMLINK_'):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.symlinks = []
for rule in self.get_rules():
try:
self.symlinks += self.add_symlink(rule)
except InitError:
continue
except ValueError:
perror('symlink: failed to parse variable', rule)
def add_symlink(self, v):
if 'symlink' not in v:
perror('symlink should be in the dict:', d)
RET = RET_ERR
return []
if 'target' not in v:
perror('target should be in the dict:', d)
RET = RET_ERR
return []
return [v]
def fixable(self):
return RET_NA
def check_symlink_exists(self, f):
if not os.path.islink(f['symlink']):
return RET_ERR
return RET_OK
def check_symlink(self, f, verbose=False):
if not os.path.islink(f['symlink']):
perror("symlink", f['symlink'], "does not exist")
return RET_ERR
if os.readlink(f['symlink']) != f['target']:
perror("symlink", f['symlink'], "does not point to", f['target'])
return RET_ERR
if verbose:
pinfo("symlink", f['symlink'], "->", f['target'], "is ok")
return RET_OK
def fix_symlink_notexists(self, f):
if self.check_symlink_exists(f) == RET_OK:
return RET_OK
d = os.path.dirname(f['symlink'])
if not os.path.exists(d):
try:
os.makedirs(d)
except OSError as e:
if e.errno == 20:
perror("symlink: can not create dir", d, "to host the symlink", f['symlink'], ": a parent is not a directory")
return RET_ERR
raise
try:
os.symlink(f['target'], f['symlink'])
except:
return RET_ERR
pinfo("symlink", f['symlink'], "->", f['target'], "created")
return RET_OK
def check(self):
r = 0
for f in self.symlinks:
r |= self.check_symlink(f, verbose=True)
return r
def fix(self):
r = 0
for f in self.symlinks:
r |= self.fix_symlink_notexists(f)
return r
if __name__ == "__main__":
main(CompSymlink)
opensvc-1.8~20170412/osvcenv.cmd 0000644 0001750 0001750 00000000300 13073467726 016424 0 ustar jkelbert jkelbert @echo off
set OSVCROOT=C:\Program Files\opensvc
set OSVCPYTHONROOT=%OSVCROOT%\python
set PYTHONPATH=%OSVCROOT%\lib
set OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe
call inpath.cmd OSVCPYTHONROOT opensvc-1.8~20170412/.gitignore 0000644 0001750 0001750 00000000372 13073467726 016255 0 ustar jkelbert jkelbert *.pyc
*.class
*.swp
etc/
log/
python/
var/
test/
tmp/
bin/python
lib/version.py
bin/pkg/release_*
bin/pkg/winbuilder/wxs/opensvcfiles.wxs
.hg/
.hgignore
lib/rcLocalEnv.py
*.wixobj
*.wixpdb
*.msi
nohup.out
.project
.pydevproject
bin/init/opensvc.init
opensvc-1.8~20170412/cron.cmd 0000644 0001750 0001750 00000000112 13073467726 015703 0 ustar jkelbert jkelbert @echo off
call osvcenv.cmd
"%OSVCPYTHONEXEC%" "%OSVCROOT%\cron\opensvc" %* opensvc-1.8~20170412/lib/ 0000755 0001750 0001750 00000000000 13073467726 015031 5 ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resIpGce.py 0000644 0001750 0001750 00000015041 13073467726 017105 0 ustar jkelbert jkelbert import resIp
import os
import rcStatus
from rcGlobalEnv import rcEnv
import rcExceptions as ex
from rcUtilities import getaddr, justcall
import json
import rcGce
rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname)
class Ip(resIp.Ip, rcGce.Gce):
def __init__(self,
rid=None,
ipname=None,
ipdev=None,
eip=None,
routename=None,
gce_zone=None,
**kwargs):
resIp.Ip.__init__(self,
rid=rid,
ipname=ipname,
ipdev=ipdev,
**kwargs)
self.label = "gce ip %s@%s" % (ipname, ipdev)
if eip:
self.label += ", eip %s" % eip
self.eip = eip
self.routename = routename
self.gce_zone = gce_zone
# cache for route data
self.gce_route_data = None
def start_local_route(self):
if self.has_local_route():
self.log.info("ip route %s/32 dev %s is already installed" % (self.addr, self.ipdev))
return
self.add_local_route()
def stop_local_route(self):
if not self.has_local_route():
self.log.info("ip route %s/32 dev %s is already uninstalled" % (self.addr, self.ipdev))
return
self.del_local_route()
def add_local_route(self):
cmd = ["ip", "route", "replace", self.addr+"/32", "dev", self.ipdev]
self.vcall(cmd)
def del_local_route(self):
cmd = ["ip", "route", "del", self.addr+"/32", "dev", self.ipdev]
self.vcall(cmd)
def has_local_route(self):
cmd = ["ip", "route", "list", self.addr+"/32", "dev", self.ipdev]
out, err, ret = justcall(cmd)
if ret != 0:
return False
if out == "":
return False
return True
def start_gce_route(self):
if not self.routename:
return
if self.has_gce_route():
self.log.info("gce route %s, %s to instance %s is already installed" % (self.routename, self.addr, rcEnv.nodename))
return
if self.exist_gce_route():
self.del_gce_route()
self.add_gce_route()
self.svc.gce_routes_cache[self.routename] = {
"destRange": self.addr+"/32",
"nextHopInstance": rcEnv.nodename,
}
def stop_gce_route(self):
if not self.routename:
return
if not self.has_gce_route():
self.log.info("gce route %s, %s to instance %s is already uninstalled" % (self.routename, self.addr, rcEnv.nodename))
return
self.del_gce_route()
self.get_gce_routes_list(refresh=True)
del(self.svc.gce_routes_cache[self.routename])
def add_gce_route(self):
cmd = ["gcloud", "compute", "routes", "-q", "create", self.routename,
"--destination-range", self.addr+"/32",
"--next-hop-instance", rcEnv.nodename,
"--next-hop-instance-zone", self.gce_zone]
self.vcall(cmd)
def del_gce_route(self):
cmd = ["gcloud", "compute", "routes", "-q", "delete", self.routename]
self.vcall(cmd)
def get_gce_route_data(self, refresh=False):
data = self.get_gce_routes_list(refresh=refresh)
if data is None:
return
if not self.routename in data:
return
return data[self.routename]
def get_gce_routes_list(self, refresh=False):
if not refresh and hasattr(self.svc, "gce_routes_cache"):
return self.svc.gce_routes_cache
self.svc.gce_routes_cache = self._get_gce_routes_list()
return self.svc.gce_routes_cache
def _get_gce_routes_list(self):
if not self.routename:
return
routenames = " ".join([r.routename for r in self.svc.get_resources("ip") if hasattr(r, "routename")])
self.wait_gce_auth()
cmd = ["gcloud", "compute", "routes", "list", "--format", "json", routenames]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("gcloud route describe returned with error: %s, %s" % (out, err))
try:
data = json.loads(out)
except:
raise ex.excError("unable to parse gce route data: %s" % out)
h = {}
for route in data:
h[route["name"]] = route
return h
def exist_gce_route(self):
if not self.routename:
return True
data = self.get_gce_route_data()
if not data:
return False
if data:
return True
return False
def has_gce_route(self):
if not self.routename:
return True
data = self.get_gce_route_data()
if not data:
return False
if data.get("destRange") != self.addr+"/32":
return False
if data.get("nextHopInstance").split("/")[-1] != rcEnv.nodename:
return False
return True
def is_up(self):
"""Returns True if ip is associated with this node
"""
self.getaddr()
if not self.has_local_route():
return False
if not self.has_gce_route():
return False
return True
def _status(self, verbose=False):
self.getaddr()
try:
local_status = self.has_local_route()
if not local_status:
self.status_log("local route is not installed")
except ex.excError as e:
self.status_log(str(e))
local_status = False
try:
gce_status = self.has_gce_route()
if not gce_status:
self.status_log("gce route is not installed")
except ex.excError as e:
self.status_log(str(e))
gce_status = False
s = local_status & gce_status
if rcEnv.nodename in self.always_on:
if s:
return rcStatus.STDBY_UP
else:
return rcStatus.STDBY_DOWN
else:
if s:
return rcStatus.UP
else:
return rcStatus.DOWN
def check_ping(self, count=1, timeout=5):
pass
def start(self):
self.getaddr()
self.start_local_route()
self.start_gce_route()
def stop(self):
self.getaddr()
self.stop_local_route()
# don't unconfigure the gce route: too long. let the start replace it if necessary.
#def provision(self):
#m = __import__("provIpGce")
#prov = getattr(m, "ProvisioningIp")(self)
#prov.provisioner()
opensvc-1.8~20170412/lib/rcUpdatePkgOSF1.py 0000644 0001750 0001750 00000001065 13073467726 020247 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import sys
import tarfile
from rcGlobalEnv import rcEnv
repo_subdir = "tar"
def update(fpath):
oldpath = os.getcwd()
os.chdir("/")
tar = tarfile.open(fpath)
try:
tar.extractall()
tar.close()
except:
try:
os.unlink(fpath)
except:
pass
print("failed to unpack", file=sys.stderr)
return 1
try:
os.unlink(fpath)
except:
pass
cmd = sys.executable + ' ' + rcEnv.postinstall
return os.system(cmd)
opensvc-1.8~20170412/lib/resContainerVz.py 0000644 0001750 0001750 00000011334 13073467726 020361 0 ustar jkelbert jkelbert import os
import rcStatus
import resources as Res
from rcUtilities import which, qcall, justcall
import resContainer
import rcExceptions as ex
from rcGlobalEnv import rcEnv
class Vz(resContainer.Container):
def files_to_sync(self):
return [self._cf]
def get_cf_value(self, param):
value = None
try:
cf = self.cf()
except:
return value
with open(cf, 'r') as f:
for line in f.readlines():
if param not in line:
continue
if line.strip()[0] == '#':
continue
l = line.replace('\n', '').split('=')
if len(l) < 2:
continue
if l[0].strip() != param:
continue
value = ' '.join(l[1:]).strip().rstrip('/')
break
return value
def get_rootfs(self):
with open(self.cf(), 'r') as f:
for line in f:
if 'VE_PRIVATE' in line:
return line.strip('\n').split('=')[1].strip('"').replace('$VEID', self.name)
self.log.error("could not determine lxc container rootfs")
return ex.excError
def rcp_from(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
src = rootfs + src
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
dst = rootfs + dst
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def install_drp_flag(self):
rootfs = self.get_rootfs()
flag = os.path.join(rootfs, ".drp_flag")
self.log.info("install drp flag in container : %s"%flag)
with open(flag, 'w') as f:
f.write(' ')
f.close()
def vzctl(self, action, options=[]):
cmd = ['vzctl', action, self.name] + options
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
return out
def container_start(self):
self.vzctl('start')
def container_stop(self):
self.vzctl('stop')
def container_forcestop(self):
raise ex.excError
def operational(self):
cmd = self.runmethod + ['/sbin/ifconfig', '-a']
ret = qcall(cmd)
if ret == 0:
return True
return False
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
""" CTID 101 exist mounted running
"""
cmd = ['vzctl', 'status', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
ret, out, err = self.call(cmd)
if ret != 0:
return False
l = out.split()
if len(l) != 5:
return False
if l[2] != 'exist' or \
l[3] != 'mounted' or \
l[4] != 'running':
return False
return True
def get_container_info(self):
return {'vcpus': '0', 'vmem': '0'}
def check_manual_boot(self):
try:
cf = self.cf()
except:
return True
with open(self.cf(), 'r') as f:
for line in f:
if 'ONBOOT' in line and 'yes' in line:
return False
return True
def check_capabilities(self):
if not which('vzctl'):
self.log.debug("vzctl is not in PATH")
return False
return True
def cf(self):
if not os.path.exists(self._cf):
self.log.error("%s does not exist"%self._cf)
raise ex.excError
return self._cf
def __init__(self,
rid,
name,
guestos="Linux",
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.vz",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self._cf = os.path.join(os.sep, 'etc', 'vz', 'conf', name+'.conf')
self.runmethod = ['vzctl', 'exec', name]
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
opensvc-1.8~20170412/lib/checkMpathPowerpathAIX.py 0000777 0001750 0001750 00000000000 13073467726 026216 2checkMpathPowerpath.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/checkFsUsageDarwin.py 0000644 0001750 0001750 00000002674 13073467726 021114 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lP']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
# discard bind mounts: we get metric from the source anyway
if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'):
continue
if l[5].startswith('/Volumes'):
continue
if l[5].startswith('/run'):
continue
if l[5].startswith('/sys/'):
continue
if l[5] == "/dev/shm":
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': self.find_svc(l[5]),
})
return r
opensvc-1.8~20170412/lib/checkRaidSas2.py 0000644 0001750 0001750 00000007265 13073467726 020023 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
class check(checks.check):
prefixes = [os.path.join(os.sep, "usr", "local", "admin")]
sas2ircu = "sas2ircu"
chk_type = "raid"
chk_name = "LSI SAS200"
def find_sas2ircu(self):
if which(self.sas2ircu):
return self.sas2ircu
for prefix in self.prefixes:
sas2ircu = os.path.join(prefix, self.sas2ircu)
if os.path.exists(sas2ircu):
return sas2ircu
return
def do_check(self):
r = self.do_check_ldpdinfo()
return r
def do_check_ldpdinfo(self):
sas2ircu = self.find_sas2ircu()
if sas2ircu is None:
return self.undef
os.chdir(rcEnv.pathtmp)
logs = [os.path.join(rcEnv.pathtmp, 'sas2ircu.log')]
for log in logs:
if not os.path.exists(log):
continue
os.unlink(log)
cmd = [sas2ircu, 'LIST']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
idx = []
lines = out.split('\n')
for line in lines:
if 'SAS20' in line:
l = line.split()
idx.append(l[0])
r = []
errs = 0
for ix in idx:
cmd = [sas2ircu, str(ix), 'DISPLAY']
out, err, ret = justcall(cmd)
lines = out.split('\n')
ctrl = "ctrl:"+str(ix)
slot=""
chk_dsk = 0
for line in lines:
if line.startswith('IR volume'):
chk_dsk = 2
if line.startswith(' Volume Name') and 'Virtual Disk' in line and (chk_dsk == 2):
l = line.split()
slot = 'LD'+str(l[-1])
if line.startswith(' Status of volume') and (chk_dsk == 2):
if 'Okay (OKY)' not in line:
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', })
errs += 1
else :
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', })
if line.startswith('Device is a Hard disk'):
chk_dsk = 1
if line.startswith(' Enclosure #') and (chk_dsk == 1):
l = line.split()
enc = l[-1]
if line.startswith(' Slot #') and (chk_dsk == 1):
l = line.split()
slot = 'PD'+str(enc)+':'+str(l[-1])
if line.startswith(' State') and (chk_dsk == 1):
if 'Optimal (OPT)' not in line:
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', })
errs += 1
else :
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', })
if line.startswith('Device is a Enclosure services device'):
chk_dsk = 3
if line.startswith(' Enclosure #') and (chk_dsk == 3):
l = line.split()
slot = 'Enc'+str(l[-1])
if line.startswith(' State') and (chk_dsk == 3):
if 'Standby (SBY)' not in line:
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', })
errs += 1
else :
r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', })
r.append({ 'chk_instance': 'all SAS20*', 'chk_value': str(errs), 'chk_svcname': '', })
return r
opensvc-1.8~20170412/lib/rcDcs.py 0000644 0001750 0001750 00000022223 13073467726 016442 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
from xml.etree.ElementTree import XML, fromstring
import rcExceptions as ex
import os
import ConfigParser
import uuid
from rcGlobalEnv import rcEnv
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def dcscmd(cmd, manager, username, password, dcs=None, conn=None):
if conn is None:
conn = uuid.uuid1().hex
if len(cmd) == 0:
return
_cmd = ['ssh', manager]
if dcs is not None:
_cmd += ["connect-dcsserver -server %s -username %s -password %s -connection %s ; "%(dcs, username, password, conn)+\
cmd+ " ; disconnect-dcsserver -connection %s"%conn]
else:
_cmd += [cmd]
out, err, ret = justcall(_cmd)
if "ErrorId" in err:
print(_cmd)
print(out)
raise ex.excError("dcs command execution error")
try:
out = out.decode("latin1").encode("utf8")
except:
pass
return out, err, ret
class Dcss(object):
arrays = []
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = []
for s in conf.sections():
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "datacore":
continue
try:
manager = s
dcs = conf.get(s, 'dcs').split()
username = conf.get(s, 'username')
password = conf.get(s, 'password')
m += [(manager, dcs, username, password)]
except:
print("error parsing section", s)
pass
del(conf)
done = []
for manager, dcs, username, password in m:
for name in dcs:
if self.filtering and name not in self.objects:
continue
if name in done:
continue
self.arrays.append(Dcs(name, manager, username, password))
done.append(name)
def __iter__(self):
for array in self.arrays:
yield(array)
def get_dcs(self, domain):
for dcs in self.arrays:
_domain = dcs.get_domain()
if _domain == domain:
return dcs
return None
class Dcs(object):
def __init__(self, name, manager, username, password, conn=None):
self.name = name
self.manager = manager
self.username = username
self.password = password
self.conn = conn
if conn is None:
self.conn = uuid.uuid1().hex
self.keys = ['dcsservergroup',
'dcsserver',
'dcspool',
'dcspoolperf',
'dcslogicaldisk',
'dcslogicaldiskperf',
'dcsvirtualdisk',
'dcsphysicaldisk',
'dcsdiskpath',
'dcsport',
'dcspoolmember']
def get_domain(self):
if hasattr(self, 'domain'):
return self.domain
buff = self.get_dcsservergroup()
for line in buff.split('\n'):
if not line.startswith('Alias'):
continue
self.domain = line.split(': ')[-1].strip()
break
if hasattr(self, 'domain'):
return self.domain
return "unknown"
def dcscmd(self, cmd):
return dcscmd(cmd, self.manager, self.username, self.password, dcs=self.name, conn=self.conn)
def get_dcsservergroup(self):
cmd = 'get-dcsservergroup -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcsserver(self):
cmd = 'get-dcsserver -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcspool(self):
cmd = 'get-dcspool -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcslogicaldisk(self):
cmd = 'get-dcslogicaldisk -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcsvirtualdisk(self):
cmd = 'get-dcsvirtualdisk -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcsphysicaldisk(self):
cmd = 'get-dcsphysicaldisk -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcsdiskpath(self):
cmd = 'get-dcsdiskpath -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcspoolmember(self):
cmd = 'get-dcspoolmember -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcspoolperf(self):
cmd = 'get-dcspool -connection %s | get-dcsperformancecounter -connection %s'%(self.conn, self.conn)
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcslogicaldiskperf(self):
cmd = 'get-dcslogicaldisk -connection %s | get-dcsperformancecounter -connection %s'%(self.conn, self.conn)
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def get_dcsport(self):
cmd = 'get-dcsport -connection %s'%self.conn
print("%s: %s"%(self.name, cmd))
buff = self.dcscmd(cmd)[0]
return buff
def add_vdisk(self, data):
if 'disk_name' not in data:
raise ex.excError("'disk_name' key is mandatory")
if 'size' not in data:
raise ex.excError("'size' key is mandatory")
if 'paths' not in data:
raise ex.excError("'paths' key is mandatory")
data['disk_name'] = data['disk_name'] + '.1'
l = data['paths'].split(',')
paths = []
for path in l:
if 'iqn' in path:
c, s = path.split('-iqn')
s = 'iqn' + s
paths.append((c, s))
elif '-' in path:
c, s = path.split('-')
paths.append((c, s))
if len(paths) == 0:
raise ex.excError("no initiator to present to")
pools = data['dg_name'].split(',')
if len(pools) == 2:
_pool1 = pools[0].split(':')
_pool2 = pools[1].split(':')
if len(_pool1) != 2 or len(_pool2) != 2:
raise ex.excError("'dg_name' value is misformatted")
d = {
'disk_name': data['disk_name'],
'size': data['size'],
'sds1': _pool1[0],
'sds2': _pool2[0],
'pool1': _pool1[1],
'pool2': _pool2[1],
'conn': self.conn,
}
cmd = """$v = Add-DcsVirtualDisk -connection %(conn)s -Name "%(disk_name)s" -Size %(size)dGB -EnableRedundancy -FirstServer %(sds1)s -FirstPool "%(pool1)s" -SecondServer %(sds2)s -SecondPool "%(pool2)s" ;""" % d
elif len(pools) == 1:
_pool1 = pools[0].split(':')
if len(_pool1) != 2:
raise ex.excError("'dg_name' value is misformatted")
d = {
'disk_name': data['disk_name'],
'size': data['size'],
'sds1': _pool1[0],
'pool1': _pool1[1],
'conn': self.conn,
}
cmd = """$v = Add-DcsVirtualDisk -connection %(conn)s -Name "%(disk_name)s" -Size %(size)dGB -Server %(sds1)s -Pool "%(pool1)s" ;""" % d
else:
raise ex.excError("'dg_name' value is misformatted")
for machine in self.get_machines(map(lambda x: x[0], paths)):
cmd += " $v | Serve-DcsVirtualDisk -connection %s -Machine %s -EnableRedundancy ;"""%(self.conn, machine)
print(cmd)
out, err, ret = self.dcscmd(cmd)
def get_machines(self, ids):
for i, id in enumerate(ids):
if 'iqn' in id or ('-' in id and len(id) == 16):
# iscsi or already in correct format
continue
# convert to dcs portname format
id = list(id.upper())
for j in (14, 12, 10, 8, 6, 4, 2):
id.insert(j, '-')
id = ''.join(id)
ids[i] = id
if not hasattr(self, "buff_dcsport"):
self.buff_dcsport = self.get_dcsport()
machines = set([])
for line in self.buff_dcsport.split('\n'):
if line.startswith('HostId'):
hostid = line.split(': ')[-1].strip()
elif line.startswith('Id'):
id = line.split(': ')[-1].strip()
if id in ids:
machines.add(hostid)
return machines
if __name__ == "__main__":
o = Dcss()
for dcs in o:
print(dcs.get_dcsserver())
opensvc-1.8~20170412/lib/resShareNfsLinux.py 0000644 0001750 0001750 00000010667 13073467726 020660 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import justcall, which
import rcStatus
import rcExceptions as ex
from resources import Resource
class Share(Resource):
def get_exports(self):
self.data = {}
cmd = [ 'exportfs', '-v' ]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
out = out.replace('\n ', '').replace('\n\t', '')
for line in out.split('\n'):
words = line.split()
if len(words) != 2:
continue
path = words[0]
e = words[1]
if path not in self.data:
self.data[path] = {}
try:
client, opts = self.parse_entry(e)
except ex.excError as e:
continue
if client == '':
client = '*'
self.data[path][client] = opts
return self.data
def is_up(self):
self.issues = {}
self.issues_missing_client = []
self.issues_wrong_opts = []
self.issues_none = []
exports = self.get_exports()
if self.path not in exports:
return False
for client in self.opts:
if client not in exports[self.path]:
self.issues[client] = "%s not exported to client %s"%(self.path, client)
self.issues_missing_client.append(client)
elif self.opts[client] > exports[self.path][client]:
self.issues[client] = "%s is exported to client %s with missing options: current '%s', minimum required '%s'"%(self.path, client, ','.join(exports[self.path][client]), ','.join(self.opts[client]))
self.issues_wrong_opts.append(client)
else:
self.issues_none.append(client)
return True
def start(self):
try:
up = self.is_up()
except ex.excError as e:
self.log.error("skip start because the share is in unknown state")
return
if up and len(self.issues) == 0:
self.log.info("%s is already up" % self.path)
return
self.can_rollback = True
for client, opts in self.opts.items():
if client in self.issues_none:
continue
if client in self.issues_wrong_opts:
cmd = [ 'exportfs', '-u', ':'.join((client, self.path)) ]
ret, out, err = self.vcall(cmd)
cmd = [ 'exportfs', '-o', ','.join(opts), ':'.join((client, self.path)) ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def stop(self):
try:
up = self.is_up()
except ex.excError as e:
self.log.error("continue with stop even if the share is in unknown state")
if not up:
self.log.info("%s is already down" % self.path)
return 0
for client in self.opts:
cmd = [ 'exportfs', '-u', ':'.join((client, self.path)) ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def _status(self, verbose=False):
try:
up = self.is_up()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if len(self.issues) > 0:
self.status_log('\n'.join(self.issues.values()))
return rcStatus.WARN
if rcEnv.nodename in self.always_on:
if up: return rcStatus.STDBY_UP
else: return rcStatus.STDBY_DOWN
else:
if up: return rcStatus.UP
else: return rcStatus.DOWN
def parse_entry(self, e):
if '(' not in e or ')' not in e:
raise ex.excError("malformed share opts: '%s'. must be in client(opts) client(opts) format"%e)
_l = e.split('(')
client = _l[0]
opts = _l[1].strip(')')
return client, set(opts.split(','))
def __init__(self, rid, path, opts, **kwargs):
Resource.__init__(self, rid, type="share.nfs", **kwargs)
if not which("exportfs"):
raise ex.excInitError("exportfs is not installed")
self.label = "nfs:"+path
self.path = path
l = opts.replace('\\', '').split()
self.opts = {}
for e in l:
try:
client, opts = self.parse_entry(e)
except ex.excError as e:
raise ex.excInitError(str(e))
self.opts[client] = opts
opensvc-1.8~20170412/lib/resIpAIX.py 0000644 0001750 0001750 00000001610 13073467726 017025 0 ustar jkelbert jkelbert import resIp as Res
import rcExceptions as ex
from rcUtilitiesAIX import check_ping
from rcUtilities import to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', 'alias', '/'.join([self.addr, to_cidr(self.mask)])]
else:
cmd = ['ifconfig', self.ipdev, self.addr, 'netmask', to_dotted(self.mask), 'alias']
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete']
else:
cmd = ['ifconfig', self.ipdev, self.addr, 'delete']
return self.vcall(cmd)
opensvc-1.8~20170412/lib/resContainerOpenstack.py 0000644 0001750 0001750 00000025733 13073467726 021721 0 ustar jkelbert jkelbert import rcStatus
import resources as Res
import time
import os
import rcExceptions as ex
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
from rcUtilitiesLinux import check_ping
import resContainer
class CloudVm(resContainer.Container):
startup_timeout = 240
shutdown_timeout = 120
save_timeout = 240
def __init__(self,
rid,
name,
guestos=None,
cloud_id=None,
size="tiny",
key_name=None,
shared_ip_group=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.openstack",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.cloud_id = cloud_id
self.save_name = name + '.save'
self.size_name = size
self.key_name = key_name
self.shared_ip_group = shared_ip_group
self.addr = None
def keyfile(self):
kf = [os.path.join(rcEnv.pathetc, self.key_name+'.pem'),
os.path.join(rcEnv.pathetc, self.key_name+'.pub'),
os.path.join(rcEnv.pathvar, self.key_name+'.pem'),
os.path.join(rcEnv.pathvar, self.key_name+'.pub')]
for k in kf:
if os.path.exists(k):
return k
raise ex.excError("key file for key name '%s' not found"%self.key_name)
def rcp_from(self, src, dst):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote copy not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable public ip to send files to')
timeout = 5
cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
self.addr+':'+src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote copy not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable public ip to send files to')
timeout = 5
cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
src, self.addr+':'+dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcmd(self, cmd):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote commands not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable public ip to send command to')
if type(cmd) == str:
cmd = cmd.split(" ")
timeout = 5
cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'ForwardX11=no',
'-o', 'BatchMode=yes',
'-n',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
self.addr] + cmd
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def get_size(self):
c = self.get_cloud()
for size in c.driver.list_sizes():
if size.name == self.size_name:
return size
raise ex.excError("%s size not found"%self.size_name)
def get_cloud(self):
if hasattr(self, 'cloud'):
return self.cloud
c = self.svc.node.cloud_get(self.cloud_id)
self.cloud = c
return self.cloud
def get_node(self):
c = self.get_cloud()
l = c.list_nodes()
for n in l:
if n.name == self.name:
return n
return
def get_save_name(self):
import datetime
now = datetime.datetime.now()
save_name = self.save_name + now.strftime(".%Y-%m-%d.%H:%M:%S")
return save_name
def purge_saves(self):
c = self.get_cloud()
l = c.driver.list_images()
d = {}
for image in l:
if image.name.startswith(self.save_name):
d[image.name] = image
if len(d) == 0:
raise ex.excError("no save image found")
elif len(d) == 1:
self.log.info("no previous save image to delete")
for k in sorted(d.keys())[:-1]:
self.log.info("delete previous save image %s"%d[k].name)
c.driver.ex_delete_image(d[k])
def get_last_save(self):
return self.get_image(self.save_name)
def get_template(self):
template = self.svc.config.defaults()['template']
return self.get_image(template)
def get_image(self, name):
c = self.get_cloud()
l = c.driver.list_images()
d = {}
for image in l:
if image.name == name:
# exact match
return image
elif image.name.startswith(name):
d[image.name] = image
if len(d) == 0:
raise ex.excError("image %s not found"%name)
for k in sorted(d.keys()):
last = d[k]
return last
def has_image(self, name):
c = self.get_cloud()
l = c.driver.list_images()
for image in l:
if image.name == name:
return True
return False
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def getaddr(self):
if self.addr is not None:
return
n = self.get_node()
if n is None:
raise ex.excError("could not get node details")
ips = set(n.public_ips+n.private_ips)
if len(ips) == 0:
return 0
# find first pinging ip
for ip in ips:
if check_ping(ip, timeout=1, count=1):
self.addr = ip
break
return 0
def files_to_sync(self):
return []
def check_capabilities(self):
return True
def ping(self):
if self.addr is None:
return 0
return check_ping(self.addr, timeout=1, count=1)
def start(self):
if self.is_up():
self.log.info("container %s already started" % self.name)
return
if rcEnv.nodename in self.svc.drpnodes:
self.install_drp_flag()
self.container_start()
self.can_rollback = True
self.wait_for_startup()
def container_start(self):
n = self.get_node()
if n is not None:
if n.state == 4:
self.log.info("reboot %s"%self.name)
self.container_reboot()
else:
raise ex.excError("abort reboot because vm is in state %d (!=4)"%n.state)
else:
self.container_restore()
def container_reboot(self):
c = self.get_cloud()
n = self.get_node()
try:
c.driver.reboot_node(n)
except Exception as e:
raise ex.excError(str(e))
def container_restore(self):
c = self.get_cloud()
image = self.get_last_save()
size = self.get_size()
self.log.info("create instance %s, size %s, image %s, key %s"%(self.name, size.name, image.name, self.key_name))
n = c.driver.create_node(name=self.name, size=size, image=image, ex_keyname=self.key_name, ex_shared_ip_group_id=self.shared_ip_group)
self.log.info("wait for container up status")
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
#n = c.driver.ex_update_node(n, accessIPv4='46.231.128.84')
def wait_for_startup(self):
pass
def stop(self):
if self.is_down():
self.log.info("container %s already stopped" % self.name)
return
self.container_stop()
try:
self.wait_for_shutdown()
except ex.excError:
self.container_forcestop()
self.wait_for_shutdown()
def container_stop(self):
cmd = "shutdown -h now"
self.log.info("remote command: %s"%cmd)
self.rcmd(cmd)
def container_forcestop(self):
c = self.get_cloud()
n = self.get_node()
self.container_save()
c.driver.destroy_node(n)
self.purge_saves()
def print_obj(self, n):
for k in dir(n):
if '__' in k:
continue
print(k, "=", getattr(n, k))
def container_save(self):
c = self.get_cloud()
n = self.get_node()
save_name = self.get_save_name()
if self.has_image(save_name):
return
#self.print_obj(n)
if n.state == 9999:
self.log.info("a save is already in progress")
return
self.log.info("save new image %s"%save_name)
try:
image = c.driver.ex_save_image(n, save_name)
except Exception as e:
raise ex.excError(str(e))
import time
delay = 5
for i in range(self.save_timeout//delay):
img = c.driver.ex_get_image(image.id)
if img.extra['status'] != 'SAVING':
break
time.sleep(delay)
if img.extra['status'] != 'ACTIVE':
raise ex.excError("save failed, image status %s"%img.extra['status'])
def is_up(self):
n = self.get_node()
if n is not None and n.state == 0:
return True
return False
def get_container_info(self):
self.info = {'vcpus': '0', 'vmem': '0'}
c = self.get_cloud()
n = self.get_node()
try:
size = c.driver.ex_get_size(n.extra['flavorId'])
self.info['vmem'] = str(size.ram)
except:
pass
return self.info
def check_manual_boot(self):
return True
def install_drp_flag(self):
pass
def provision(self):
c = self.get_cloud()
image = self.get_template()
size = self.get_size()
self.log.info("create instance %s, size %s, image %s, key %s"%(self.name, size.name, image.name, self.key_name))
c.driver.create_node(name=self.name, size=size, image=image, ex_keyname=self.key_name, ex_shared_ip_group_id=self.shared_ip_group)
#self.wait_for_startup()
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
opensvc-1.8~20170412/lib/rcStatsAIX.py 0000644 0001750 0001750 00000007766 13073467726 017410 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import call, which
from rcGlobalEnv import rcEnv
today = datetime.datetime.today()
yesterday = today - datetime.timedelta(days=1)
def sarfile(day):
f = os.path.join(os.sep, 'var', 'adm', 'sa', 'sa'+day)
if os.path.exists(f):
return f
return None
def twodays(fn):
if which('sar') is None:
return []
lines = fn(yesterday)
lines += fn(today)
return lines
def stats_cpu():
return twodays(stats_cpu_day)
def stats_cpu_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-u', '-P', 'ALL', '-f', f]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[1] == '%usr':
continue
if l[0] == 'Average':
continue
# SunOS: date %usr %sys %wio %idle
# xmlrpc: date cpu usr nice sys iowait steal irq soft guest idle nodename
x = ['%s %s'%(d, l[0]), 'all', '0', '0', '0', '0', '0', '0', '0', '0', '0', rcEnv.nodename]
x[1] = l[1].replace('-', 'all')
x[2] = l[2]
x[4] = l[3]
x[5] = l[4]
x[10] = l[5]
lines.append(x)
return lines
def stats_mem_u(file, collect_date=None):
return twodays(stats_mem_u_day)
def stats_mem_u_day(t):
return []
def stats_proc(file, collect_date=None):
return twodays(stats_proc_day)
def stats_proc_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-q', '-f', f]
(ret, buff, err) = call(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) < 3:
continue
if ':' not in l[0]:
continue
""" xmlrpc: date runq_sz plist_sz ldavg_1 ldavg_5 ldavg_15 nodename
"""
x = ['%s %s'%(d, l[0]), l[1], '0', '0', '0', '0', rcEnv.nodename]
lines.append(x)
return lines
def stats_swap(file, collect_date=None):
return twodays(stats_swap_day)
def stats_swap_day(t):
return []
def stats_block(file, collect_date=None):
return twodays(stats_block_day)
def stats_block_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-b', '-f', f]
(ret, buff, err) = call(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 9:
continue
if ':' not in l[1]:
continue
""" xmlrpc: date tps rtps wtps rbps wbps nodename
"""
x = ['%s %s'%(d, l[0]), '0', '0', '0', l[1], l[4], rcEnv.nodename]
lines.append(x)
return lines
def stats_blockdev(file, collect_date=None):
return twodays(stats_blockdev_day)
def stats_blockdev_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-d', '-f', f]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) == 8:
date = l[0]
if len(l) == 7:
l = [date] + l
if len(l) != 8:
continue
if l[1] == 'device':
continue
if l[0] == 'Average':
continue
""" xmlrpc: 22:05:01 DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util
00:00:00 device %busy avque r+w/s blks/s avwait avserv
"""
x = ['%s %s'%(d, l[0]), l[1], l[4], '0', '0', '0', l[3], l[6], l[7], l[2], rcEnv.nodename]
lines.append(x)
return lines
def stats_netdev(file, collect_date=None):
return twodays(stats_netdev_day)
def stats_netdev_day(t):
return []
def stats_netdev_err(file, collect_date=None):
return twodays(stats_netdev_err_day)
def stats_netdev_err_day(t):
return []
opensvc-1.8~20170412/lib/rcUtilitiesOSF1.py 0000644 0001750 0001750 00000000406 13073467726 020334 0 ustar jkelbert jkelbert from rcUtilities import call
def check_ping(addr, timeout=5, count=1):
cmd = ['ping', '-c', repr(count),
'-t', repr(timeout),
addr]
(ret, out, err) = call(cmd)
if ret == 0:
return True
return False
opensvc-1.8~20170412/lib/hostidDarwin.py 0000644 0001750 0001750 00000000102 13073467726 020033 0 ustar jkelbert jkelbert from uuid import getnode
def hostid():
return str(getnode())
opensvc-1.8~20170412/lib/rcUtilitiesWindows.py 0000644 0001750 0001750 00000001404 13073467726 021255 0 ustar jkelbert jkelbert import os
import re
from rcUtilities import justcall
import string
from ctypes import windll
def check_ping(addr, timeout=5, count=1):
ping = 'ping.exe'
cmd = [ping,
'-n', repr(count),
'-w', repr(timeout),
addr]
out, err, ret = justcall(cmd)
if ret == 0:
return True
return False
def get_registry_value(key, subkey, value):
import _winreg
key = getattr(_winreg, key)
handle = _winreg.OpenKey(key, subkey)
(value, type) = _winreg.QueryValueEx(handle, value)
return value
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
opensvc-1.8~20170412/lib/rcStatsHP-UX.py 0000644 0001750 0001750 00000015216 13073467726 017615 0 ustar jkelbert jkelbert import os
from rcUtilities import call
from rcGlobalEnv import rcEnv
import rcStats
class StatsProvider(rcStats.StatsProvider):
def glancefile(self, day):
f = os.path.join(rcEnv.pathvar, 'glance'+day)
if os.path.exists(f):
return f
return None
def cpu(self, d, day, start, end):
f = self.glancefile(day)
if f is None:
return [], []
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'iowait',
'steal',
'irq',
'soft',
'guest',
'idle',
'nodename']
lines = []
with open(f, 'r') as file:
for line in file:
l = line.split()
if len(l) != 24:
continue
""" hpux: usr nice sys irq wait idle
1 2 3 4 5 6
xmlrpc: date cpu usr nice sys iowait steal irq soft guest idle nodename
"""
ts = '%s %s'%(d, l[0])
ts = ts.replace('\0','')
x = [ts,
'all',
l[1],
l[2],
l[3],
l[5],
'0',
l[4],
'0',
'0',
l[6],
self.nodename]
lines.append(x)
return cols, lines
def mem_u(self, d, day, start, end):
f = self.glancefile(day)
if f is None:
return [], []
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbbuffers',
'kbcached',
'kbcommit',
'pct_commit',
'kbmemsys',
'nodename']
lines = []
with open(f, 'r') as file:
for line in file:
l = line.split()
if len(l) != 24:
continue
""" hpux: phys kbmemfree kbcached kbfilecached kbsys kbuser kbswapused kbswap
7 8 9 10 11 12 13 14
xmlrpc: date kbmemfree kbmemused pct_memused kbbuffers kbcached kbcommit pct_commit kbmemsys nodename
"""
phys = int(l[7])
free = int(l[8])
swapused = int(l[13])
swap = int(l[14])
used = phys - free
commit = used + swapused
vm = phys + swap
if vm == 0 or phys == 0:
continue
pct_commit = 100 * commit / vm
pct_used = 100 * used / phys
ts = '%s %s'%(d, l[0])
ts = ts.replace('\0','')
x = [ts,
l[8],
str(used),
str(pct_used),
l[9],
l[10],
str(commit),
str(pct_commit),
l[11],
self.nodename]
lines.append(x)
return cols, lines
def proc(self, d, day, start, end):
f = self.glancefile(day)
if f is None:
return [], []
cols = ['date',
'runq_sz',
'plist_sz',
'ldavg_1',
'ldavg_5',
'ldavg_15',
'nodename']
lines = []
with open(f, 'r') as file:
for line in file.readlines():
l = line.split()
if len(l) != 24:
continue
""" hpux: GBL_LOADAVG GBL_LOADAVG5 GBL_LOADAVG15 GBL_CPU_QUEUE TBL_PROC_TABLE_USED
15 16 17 18 19
xmlrpc: date runq_sz plist_sz ldavg_1 ldavg_5 ldavg_15 nodename
"""
ts = '%s %s'%(d, l[0])
ts = ts.replace('\0','')
x = [ts,
l[18],
l[19],
l[15],
l[16],
l[17],
self.nodename]
lines.append(x)
return cols, lines
def swap(self, d, day, start, end):
f = self.glancefile(day)
if f is None:
return [], []
lines = []
cols = ['date',
'kbswpfree',
'kbswpused',
'pct_swpused',
'kbswpcad',
'pct_swpcad',
'nodename']
with open(f, 'r') as file:
for line in file.readlines():
l = line.split()
if len(l) != 24:
continue
""" hpux: kbswapused kbswap
13 14
xmlrpc: date kbswpfree kbswpused pct_swpused kbswpcad pct_swpcad nodename
"""
swapused = int(l[13])
swap = int(l[14])
swapfree = swap - swapused
ts = '%s %s'%(d, l[0])
ts = ts.replace('\0','')
x = [ts,
str(swapfree),
l[13],
str(100 * swapused / swap),
'0',
'0',
self.nodename]
lines.append(x)
return cols, lines
def block(self, d, day, start, end):
f = self.glancefile(day)
if f is None:
return [], []
cols = ['date',
'tps',
'rtps',
'wtps',
'rbps',
'wbps',
'nodename']
lines = []
with open(f, 'r') as file:
for line in file.readlines():
l = line.split()
if len(l) != 24:
continue
""" hpux: rio wio rkb wkb
20 21 22 23
xmlrpc: date tps rtps wtps rbps wbps nodename
"""
tps = float(l[20]) + float(l[21])
ts = '%s %s'%(d, l[0])
ts = ts.replace('\0','')
x = [ts,
str(tps),
l[20],
l[21],
l[22],
l[23],
self.nodename]
lines.append(x)
return cols, lines
opensvc-1.8~20170412/lib/provFsExt3.py 0000644 0001750 0001750 00000000176 13073467726 017432 0 ustar jkelbert jkelbert import provFs
class ProvisioningFs(provFs.ProvisioningFs):
mkfs = ['mkfs.ext3', '-F', '-q']
info = ['tune2fs', '-l']
opensvc-1.8~20170412/lib/resIpFreeBSD.py 0000644 0001750 0001750 00000001630 13073467726 017620 0 ustar jkelbert jkelbert import resIp as Res
import rcExceptions as ex
from rcUtilitiesFreeBSD import check_ping
from rcUtilities import to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add']
else:
cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'netmask', to_dotted(self.mask), 'add']
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete']
else:
cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'delete']
return self.vcall(cmd)
opensvc-1.8~20170412/lib/tabulate.py 0000644 0001750 0001750 00000063646 13073467726 017223 0 ustar jkelbert jkelbert # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = unicode
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate"]
__version__ = "0.6"
def __text_type(s):
try:
return _text_type(s, errors="ignore")
except:
return s
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "usecolons", "usehtmlattrs",
"with_header_hide",
"without_header_hide"])
_format_defaults = {"padding": 0,
"usecolons": False,
"usehtmlattrs": False,
"with_header_hide": [],
"without_header_hide": []}
_table_formats = {"simple":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
usehtmlattrs=False,
with_header_hide=["linebelow"],
without_header_hide=[]),
"plain":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=None,
headerrow=DataRow(" ", " ", " "),
datarow=DataRow("|", "|", "|"),
padding=0,
usecolons=_format_defaults["usecolons"],
usehtmlattrs=_format_defaults["usehtmlattrs"],
with_header_hide=_format_defaults["with_header_hide"],
without_header_hide=_format_defaults["without_header_hide"]),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"pipe":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=True,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=[]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
usehtmlattrs=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ \n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=DataRow("!", "!!", ""),
datarow=DataRow("|", "||", ""),
padding=1,
usecolons=False,
usehtmlattrs=True,
with_header_hide=[],
without_header_hide=["linebelowheader"])}
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == u'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=None, datarow=DataRow('', '\t', ''),
padding=_format_defaults["padding"],
usecolons=_format_defaults["usecolons"],
usehtmlattrs=_format_defaults["usehtmlattrs"],
with_header_hide=_format_defaults["with_header_hide"],
without_header_hide=_format_defaults["without_header_hide"])
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type(u'\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, u'\u044f\u0439\u0446\u0430') == u'\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
return re.sub(_invisible_codes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment in "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment in "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _text_type: 4 }
invtypes = { 4: _text_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", u'\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=u""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = [u'\u0431\u0443\u043a\u0432\u0430', u'\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [[u'\u0430\u0437', 2], [u'\u0431\u0443\u043a\u0438', 4]] ; \
good_result = u'\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _binary_type, _text_type]:
return u"{0}".format(__text_type(val))
elif valtype is float:
return format(float(val), floatfmt)
else:
return u"{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [u""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=u""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable or iterables), a dictionary of
iterables, a two-dimensional NumPy array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: right, center, left, decimal (only
for `numalign`).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', and 'mediawiki'.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
>>> print(tabulate([["eggs", 42], ["spam", 23]], tablefmt="mediawiki", stralign="left"))
{| class="wikitable" style="text-align: left;"
|+
|-
| eggs || align="right"| 42
|-
| spam || align="right"| 23
|}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = u'\n'.join(['\t'.join(map(_text_type, headers))] + \
[u'\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = u" "*padding
padded_cells = [pad + cell + pad for cell in cells]
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_line(colwidths, padding, begin, fill, sep, end):
"Return a string which represents a horizontal line."
cells = [fill*(w + 2*padding) for w in colwidths]
return _build_row(cells, 0, begin, sep, end)
def _mediawiki_cell_attrs(row, colaligns):
"Prefix every cell in a row with an HTML alignment attribute."
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
row2 = [alignment[a] + c for c, a in zip(row, colaligns)]
return row2
def _line_segment_with_colons(linefmt, align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
fill = linefmt.hline
w = colwidth
if align in ["right", "decimal"]:
return (fill[0] * (w - 1)) + ":"
elif align == "center":
return ":" + (fill[0] * (w - 2)) + ":"
elif align == "left":
return ":" + (fill[0] * (w - 1))
else:
return fill[0] * w
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.usehtmlattrs:
headers = _mediawiki_cell_attrs(headers, colaligns)
rows = [_mediawiki_cell_attrs(row, colaligns) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [_line_segment_with_colons(fmt.linebelowheader, a, w + 2*pad)
for w,a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines)
opensvc-1.8~20170412/lib/hostidFreeBSD.py 0000777 0001750 0001750 00000000000 13073467726 022711 2hostidLinux.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resIpDarwin.py 0000644 0001750 0001750 00000001605 13073467726 017634 0 ustar jkelbert jkelbert import resIp as Res
import rcExceptions as ex
from rcUtilitiesFreeBSD import check_ping
from rcUtilities import to_cidr
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add']
else:
cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'netmask', '0xffffffff', 'add']
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete']
else:
cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'delete']
return self.vcall(cmd)
opensvc-1.8~20170412/lib/rcIfconfigSunOS.py 0000644 0001750 0001750 00000011342 13073467726 020405 0 ustar jkelbert jkelbert from subprocess import *
import rcIfconfig
class ifconfig(rcIfconfig.ifconfig):
def __init__(self, ifconfig=None, mcast=False):
self.intf = []
if mcast:
self.mcast_data = self.get_mcast()
else:
self.mcast_data = {}
if ifconfig is not None:
out = ifconfig
else:
out = Popen(['/usr/sbin/ifconfig', '-a'], stdin=None, stdout=PIPE,stderr=PIPE,close_fds=True).communicate()[0]
self.parse(out)
def get_mcast(self):
cmd = ['netstat', '-gn']
out = Popen(cmd, stdout=PIPE).communicate()[0]
return self.parse_mcast(out)
def parse_mcast(self, out):
lines = out.split('\n')
found = False
data = {}
for i, line in enumerate(lines):
if line.startswith('--'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if len(line) == 0:
break
try:
intf, addr, refcnt = line.split()
except:
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
if len(lines) <= i + 1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if line.startswith('--'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if len(line) == 0:
break
try:
intf, addr, refcnt = line.split()
except:
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
return data
def set_hwaddr(self, i):
if i is None or i.hwaddr != '' or ':' not in i.name:
return i
base_ifname, index = i.name.split(':')
base_intf = self.interface(base_ifname)
if base_intf is not None and len(base_intf.hwaddr) > 0:
i.hwaddr = base_intf.hwaddr
else:
i.hwaddr = self.mac_from_arp(i.ipaddr)
return i
def mac_from_arp(self, ipaddr):
cmd = ['/usr/sbin/arp', ipaddr]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return ''
for word in out.split():
if ':' not in word:
continue
return word
return ''
def parse(self, out):
i = None
for l in out.split("\n"):
if l == '' : break
if l[0]!='\t' :
i = self.set_hwaddr(i)
(ifname,ifstatus)=l.split(': ')
i=rcIfconfig.interface(ifname)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mask = ''
i.mtu = ''
i.ipaddr = ''
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.groupname = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_ipv4 = False
i.flag_ipv6 = False
i.flag_loopback = False
if 'UP' in ifstatus : i.flag_up = True
if 'DEPRECATED' in ifstatus : i.flag_deprecated = True
if 'BROADCAST' in ifstatus : i.flag_broadcast = True
if 'RUNNING' in ifstatus : i.flag_running = True
if 'MULTICAST' in ifstatus : i.flag_multicast = True
if 'IPv4' in ifstatus : i.flag_ipv4 = True
if 'IPv6' in ifstatus : i.flag_ipv6 = True
else:
n=0
w=l.split()
while n < len(w) :
[p,v]=w[n:n+2]
if p == 'inet' : i.ipaddr=v
elif p == 'netmask' : i.mask=v
elif p == 'broadcast' : i.bcast=v
elif p == 'ether' : i.hwaddr=v
elif p == 'groupname' : i.groupname=v
elif p == 'inet6' :
(a, m) = v.split('/')
i.ip6addr += [a]
i.ip6mask += [m]
n+=2
i = self.set_hwaddr(i)
if __name__ == "__main__":
ifaces = ifconfig(mcast=True)
print(ifaces)
opensvc-1.8~20170412/lib/rcFreenas.py 0000644 0001750 0001750 00000076224 13073467726 017326 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
import os
import json
from optparse import Option
import requests
from rcConfigParser import RawConfigParser
import rcExceptions as ex
from rcGlobalEnv import rcEnv, Storage
from rcUtilities import convert_size, bdecode
from rcOptParser import OptParser
try:
requests.packages.urllib3.disable_warnings()
except:
pass
VERIFY = False
PROG = "nodemgr array"
OPT = Storage({
"help": Option(
"-h", "--help", default=None, action="store_true", dest="parm_help",
help="show this help message and exit"),
"array": Option(
"-a", "--array", default=None, action="store", dest="array_name",
help="The name of the array, as defined in auth.conf"),
"name": Option(
"--name", default=None, action="store", dest="name",
help="The object name"),
"volume": Option(
"--volume", default=None, action="store", dest="volume",
help="The volume to create the disk into"),
"size": Option(
"--size", default="0", action="store", dest="size",
help="The disk size, expressed as a size expression like 1g, 100mib, ..."),
"target": Option(
"--target", action="append", dest="targets",
help="A target name to export the disk through. Can be set multiple times."),
"blocksize": Option(
"--blocksize", default=512, type=int, action="store", dest="blocksize",
help="The exported disk blocksize in B"),
"secure_tpc": Option(
"--secure-tpc", default=True, action="store_false", dest="insecure_tpc",
help="Set the insecure_tpc flag to False"),
"compression": Option(
"--compression", default="on", action="store", dest="compression",
choices=["on", "off", "inherit", "lzjb", "lz4", "gzip", "gzip-9", "zle"],
help="Toggle compression"),
"dedup": Option(
"--dedup", default="off", action="store", dest="dedup", choices=["on", "off"],
help="Toggle dedup"),
"naa": Option(
"--naa", default=None, action="store", dest="naa",
help="The disk naa identifier"),
"initiator": Option(
"--initiator", action="append", dest="initiators",
help="An initiator iqn. Can be specified multiple times."),
"auth_network": Option(
"--auth-network", default="ALL", action="store", dest="auth_network",
help="Network authorized to access to the iSCSI target. ip or cidr addresses or 'ALL' for any ips"),
"comment": Option(
"--comment", action="store", dest="comment",
help="Description for your reference"),
"id": Option(
"--id", action="store", type=int, dest="id",
help="An object id, as reported by a list action"),
"alias": Option(
"--alias", action="store", dest="alias",
help="An object name alias"),
"target_id": Option(
"--target-id", action="store", type=int, dest="target_id",
help="The target object id"),
"authgroup_id": Option(
"--auth-group-id", action="store", type=int, dest="authgroup_id",
help="The auth group object id"),
"authtype": Option(
"--auth-type", action="store", default="None", dest="authtype",
choices=["None", "CHAP", "CHAP Mutual"],
help="None, CHAP, CHAP Mutual"),
"portal_id": Option(
"--portal-id", action="store", type=int, dest="portal_id",
help="The portal object id"),
"initiatorgroup_id": Option(
"--initiatorgroup-id", action="store", type=int, dest="initiatorgroup_id",
help="The initiator group object id"),
"mappings": Option(
"--mappings", action="append", dest="mappings",
help="A :,,... mapping used in add map in replacement of --targetgroup and --initiatorgroup. Can be specified multiple times."),
})
GLOBAL_OPTS = [
OPT.array,
]
DEPRECATED_ACTIONS = []
ACTIONS = {
"Add actions": {
"add_iscsi_file": {
"msg": "Add and present a file-backed iscsi disk",
"options": [
OPT.name,
OPT.volume,
OPT.size,
OPT.target,
OPT.blocksize,
OPT.secure_tpc,
OPT.mappings,
],
},
"add_iscsi_zvol": {
"msg": "Add and present a zvol-backed iscsi disk",
"options": [
OPT.name,
OPT.volume,
OPT.size,
OPT.target,
OPT.blocksize,
OPT.secure_tpc,
OPT.compression,
OPT.dedup,
OPT.mappings,
],
},
"add_iscsi_initiatorgroup": {
"msg": "Declare a group of iscsi initiator iqn, for use in targetgroups which are portal-target-initiator relations",
"options": [
OPT.initiator,
OPT.comment,
OPT.auth_network,
],
},
"add_iscsi_target": {
"msg": "Declare a iscsi target, for use in targetgroups which are portal-target-initiator relations",
"options": [
OPT.name,
OPT.alias,
],
},
"add_iscsi_targetgroup": {
"msg": "Declare a iscsi target group, which is a portal-target-initiator relation",
"options": [
OPT.portal_id,
OPT.target_id,
OPT.initiatorgroup_id,
OPT.authgroup_id,
OPT.authtype,
],
},
},
"Delete actions": {
"del_iscsi_file": {
"msg": "Delete and unpresent a file-backed iscsi disk",
"options": [
OPT.name,
OPT.naa,
],
},
"del_iscsi_zvol": {
"msg": "Delete and unpresent a zvol-backed iscsi disk",
"options": [
OPT.name,
OPT.naa,
],
},
"del_iscsi_initiatorgroup": {
"msg": "Delete a group of iscsi initiator iqn, used in targets which are portal-target-initiator relations",
"options": [
OPT.id,
],
},
"del_iscsi_target": {
"msg": "Delete a iscsi target, used in targets which are portal-target-initiator relations",
"options": [
OPT.id,
],
},
"del_iscsi_targetgroup": {
"msg": "Delete a iscsi target group, which is a portal-target-initiator relation",
"options": [
OPT.id,
],
},
},
"Modify actions": {
"resize_zvol": {
"msg": "Resize a zvol",
"options": [
OPT.name,
OPT.naa,
OPT.size,
],
},
},
"List actions": {
"list_mappings": {
"msg": "List configured volumes",
"options": [
OPT.name,
OPT.naa,
],
},
"list_volume": {
"msg": "List configured volumes",
},
"list_iscsi_portal": {
"msg": "List configured portals",
},
"list_iscsi_target": {
"msg": "List configured targets",
},
"list_iscsi_targetgroup": {
"msg": "List configured target groups",
},
"list_iscsi_targettoextent": {
"msg": "List configured target-to-extent relations",
},
"list_iscsi_extent": {
"msg": "List configured extents",
},
"list_iscsi_initiatorgroup": {
"msg": "List configured initiator groups",
},
},
}
class Freenass(object):
arrays = []
def __init__(self, objects=[]):
self.objects = objects
self.filtering = len(objects) > 0
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = RawConfigParser()
conf.read(cf)
m = []
for s in conf.sections():
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "freenas":
continue
try:
name = s
api = conf.get(s, 'api')
username = conf.get(s, 'username')
password = conf.get(s, 'password')
m += [(name, api, username, password)]
except:
print("error parsing section", s)
del conf
done = []
for name, api, username, password in m:
if self.filtering and name not in self.objects:
continue
if name in done:
continue
self.arrays.append(Freenas(name, api, username, password))
done.append(name)
def __iter__(self):
for array in self.arrays:
yield(array)
def get_freenas(self, name):
for array in self.arrays:
if array.name == name:
return array
return None
class Freenas(object):
def __init__(self, name, api, username, password):
self.node = None
self.name = name
self.api = api
self.username = username
self.password = password
self.auth = (username, password)
self.keys = ['version',
'volumes',
'iscsi_targets',
'iscsi_targettoextents',
'iscsi_extents']
def delete(self, uri, data=None):
api = self.api+uri+"/"
headers = {'Content-Type': 'application/json'}
r = requests.delete(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers)
return r
def put(self, uri, data=None):
api = self.api+uri+"/"
headers = {'Content-Type': 'application/json'}
r = requests.put(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers)
return bdecode(r.content)
def post(self, uri, data=None):
api = self.api+uri+"/"
headers = {'Content-Type': 'application/json'}
r = requests.post(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers)
return bdecode(r.content)
def post2(self, uri, data=None):
api = self.api.replace("api/v1.0", "")+uri
s = requests.Session()
r = s.get(api)
csrf_token = r.cookies['csrftoken']
data["csrfmiddlewaretoken"] = csrf_token
r = requests.post(api, data=data, auth=self.auth, verify=VERIFY)
return bdecode(r.content)
def get(self, uri, params=None):
r = requests.get(self.api+uri+"/?format=json", params=params, auth=self.auth, verify=VERIFY)
return bdecode(r.content)
def get_version(self):
buff = self.get("/system/version")
return buff
def get_volume(self, name):
buff = self.get("/storage/volume/%s" % name, {"limit": 0})
return buff
def get_volume_datasets(self, name):
buff = self.get("/storage/volume/%s/datasets" % name, {"limit": 0})
return buff
def get_volumes(self):
buff = self.get("/storage/volume", {"limit": 0})
return buff
def get_iscsi_target_id(self, tgt_id):
buff = self.get("/services/iscsi/target/%d" % tgt_id)
return buff
def get_iscsi_targets(self):
buff = self.get("/services/iscsi/target", {"limit": 0})
return buff
def get_iscsi_targettoextents(self):
buff = self.get("/services/iscsi/targettoextent", {"limit": 0})
return buff
def get_iscsi_extents(self):
buff = self.get("/services/iscsi/extent", {"limit": 0})
return buff
def get_iscsi_portal(self):
buff = self.get("/services/iscsi/portal", {"limit": 0})
return buff
def get_iscsi_targetgroup(self):
buff = self.get("/services/iscsi/targetgroup", {"limit": 0})
return buff
def get_iscsi_targetgroup_id(self, tg_id):
buff = self.get("/services/iscsi/targetgroup/%d" % tg_id)
return buff
def get_iscsi_authorizedinitiator(self):
buff = self.get("/services/iscsi/authorizedinitiator", {"limit": 0})
return buff
def get_iscsi_authorizedinitiator_id(self, initiator_id):
buff = self.get("/services/iscsi/authorizedinitiator/%d" % initiator_id)
return buff
def get_iscsi_target_ids(self, target_names):
buff = self.get_iscsi_targets()
data = json.loads(buff)
l = []
for target in data:
if target["iscsi_target_name"] in target_names:
l.append(target["id"])
return l
def get_iscsi_extents_data(self):
buff = self.get_iscsi_extents()
data = json.loads(buff)
return data
def get_iscsi_extent(self, naa=None, name=None):
data = self.get_iscsi_extents_data()
if naa and not naa.startswith("0x"):
naa = "0x" + naa
for extent in data:
if name and name == extent["iscsi_target_extent_name"]:
return extent
if naa and naa == extent["iscsi_target_extent_naa"]:
return extent
def del_iscsi_extent(self, extent_id):
path = "/services/iscsi/extent/%d" % extent_id
response = self.delete(path)
if response.status_code != 204:
raise ex.excError("delete error: %s (%d)" % (path, response.status_code))
def add_iscsi_zvol_extent(self, name=None, size=None, volume=None,
insecure_tpc=True, blocksize=512, **kwargs):
for key in ["name", "size", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
data = self.add_zvol(name=name, size=size, volume=volume, **kwargs)
d = {
"iscsi_target_extent_type": "Disk",
"iscsi_target_extent_name": name,
"iscsi_target_extent_insecure_tpc": insecure_tpc,
"iscsi_target_extent_blocksize": blocksize,
"iscsi_target_extent_disk": "zvol/%s/%s" % (volume, name),
}
buff = self.post("/services/iscsi/extent", d)
data = json.loads(buff)
return data
def add_iscsi_file_extent(self, name=None, size=None, volume=None,
insecure_tpc=True, blocksize=512, **kwargs):
for key in ["name", "size", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
size = convert_size(size, _to="MB")
d = {
"iscsi_target_extent_type": "File",
"iscsi_target_extent_name": name,
"iscsi_target_extent_insecure_tpc": insecure_tpc,
"iscsi_target_extent_blocksize": blocksize,
"iscsi_target_extent_filesize": str(size)+"MB",
"iscsi_target_extent_path": "/mnt/%s/%s" % (volume, name),
}
buff = self.post("/services/iscsi/extent", d)
data = json.loads(buff)
return data
def add_iscsi_targets_to_extent(self, extent_id=None, targets=None,
**kwargs):
for key in ["extent_id", "targets"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
target_ids = self.get_iscsi_target_ids(targets)
data = []
for target_id in target_ids:
data.append(self.add_iscsi_target_to_extent(target_id, extent_id))
return data
def add_iscsi_target_to_extent(self, target_id, extent_id):
d = {
"iscsi_target": target_id,
"iscsi_extent": extent_id,
}
buff = self.post("/services/iscsi/targettoextent", d)
data = json.loads(buff)
return data
def del_zvol(self, name=None, volume=None, **kwargs):
for key in ["name", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
path = '/storage/volume/%s/zvols/%s' % (volume, name)
response = self.delete(path)
if response.status_code != 204:
raise ex.excError("delete error: %s (%d)" % (path, response.status_code))
def add_zvol(self, name=None, size=None, volume=None,
compression="inherit", dedup="off",
**kwargs):
for key in ["name", "size", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
size = convert_size(size, _to="MB")
d = {
"name": name,
"volsize": str(size)+"MB",
"compression": compression,
"dedup": dedup,
}
buff = self.post('/storage/volume/%s/zvols/' % volume, d)
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
def get_zvol(self, volume=None, name=None):
buff = self.get('/storage/volume/%s/zvols/%s' % (volume, name))
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
def list_mappings(self, name=None, naa=None, **kwargs):
tte_data = json.loads(self.get_iscsi_targettoextents())
if name is not None or naa is not None:
data = self.get_iscsi_extent(name=name, naa=naa)
if data is None:
raise ex.excError("extent not found")
extent_id = data["id"]
tte_data = [d for d in tte_data if d["iscsi_extent"] == extent_id]
extent_data = {}
for d in json.loads(self.get_iscsi_extents()):
extent_data[d["id"]] = d
target_data = {}
for d in json.loads(self.get_iscsi_targets()):
target_data[d["id"]] = d
tg_by_target = {}
for d in json.loads(self.get_iscsi_targetgroup()):
if d["iscsi_target"] in tg_by_target:
tg_by_target[d["iscsi_target"]].append(d)
else:
tg_by_target[d["iscsi_target"]] = [d]
ig_data = {}
for d in json.loads(self.get_iscsi_authorizedinitiator()):
ig_data[d["id"]] = d
mappings = {}
for d in tte_data:
for tg in tg_by_target[d["iscsi_target"]]:
ig_id = tg["iscsi_target_initiatorgroup"]
ig = ig_data[ig_id]
for hba_id in ig["iscsi_target_initiator_initiators"].split("\n"):
tgt_id = target_data[tg["iscsi_target"]]["iscsi_target_name"]
mappings[hba_id+":"+tgt_id] = {
"disk_id": extent_data[d["iscsi_extent"]]["iscsi_target_extent_naa"].replace("0x", ""),
"tgt_id": tgt_id,
"hba_id": hba_id,
}
return mappings
def resize_zvol(self, name=None, naa=None, size=None, **kwargs):
if size is None:
raise ex.excError("'size' key is mandatory")
if name is None and naa is None:
raise ex.excError("'name' or 'naa' must be specified")
data = self.get_iscsi_extent(name=name, naa=naa)
if data is None:
raise ex.excError("extent not found")
volume = self.extent_volume(data)
if volume is None:
raise ex.excError("volume not found")
if size.startswith("+"):
incr = convert_size(size.lstrip("+"), _to="MB")
zvol_data = self.get_zvol(volume=volume, name=data["iscsi_target_extent_name"])
current_size = convert_size(int(zvol_data["volsize"]), _to="MB")
size = str(current_size + incr) + "MB"
else:
size = str(convert_size(size, _to="MB")) + "MB"
d = {
"volsize": size,
}
buff = self.put('/storage/volume/%s/zvols/%s' % (volume, data["iscsi_target_extent_name"]), d)
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
def del_iscsi_initiatorgroup(self, ig_id=None, **kwargs):
content = self.get_iscsi_authorizedinitiator_id(ig_id)
try:
data = json.loads(content)
except ValueError:
raise ex.excError("initiator group not found")
self._del_iscsi_initiatorgroup(ig_id=ig_id, **kwargs)
print(json.dumps(data, indent=8))
return data
def _del_iscsi_initiatorgroup(self, ig_id=None, **kwargs):
if id is None:
raise ex.excError("'id' in mandatory")
response = self.delete('/services/iscsi/authorizedinitiator/%d' % ig_id)
if response.status_code != 204:
raise ex.excError(str(response))
def add_iscsi_initiatorgroup(self, **kwargs):
data = self._add_iscsi_initiatorgroup(**kwargs)
print(json.dumps(data, indent=8))
return data
def _add_iscsi_initiatorgroup(self, initiators=None, auth_network="ALL", comment=None,
**kwargs):
for key in ["initiators"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
d = {
"iscsi_target_initiator_initiators": ",".join(initiators),
"iscsi_target_initiator_auth_network": auth_network,
}
if comment:
d["iscsi_target_initiator_comment"] = comment
buff = self.post('/services/iscsi/authorizedinitiator/', d)
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
# targetgroup
def del_iscsi_targetgroup(self, tg_id=None, **kwargs):
content = self.get_iscsi_targetgroup_id(tg_id)
try:
data = json.loads(content)
except ValueError:
raise ex.excError("target group not found")
self._del_iscsi_targetgroup(tg_id=tg_id, **kwargs)
print(json.dumps(data, indent=8))
return data
def _del_iscsi_targetgroup(self, tg_id=None, **kwargs):
if tg_id is None:
raise ex.excError("'tg_id' is mandatory")
response = self.delete('/services/iscsi/targetgroup/%d' % tg_id)
if response.status_code != 204:
raise ex.excError(str(response))
def add_iscsi_targetgroup(self, **kwargs):
data = self._add_iscsi_targetgroup(**kwargs)
print(json.dumps(data, indent=8))
return data
def _add_iscsi_targetgroup(self, portal_id=None, initiatorgroup_id=None,
target_id=None, authtype="None",
authgroup_id=None, **kwargs):
for key in ["portal_id", "initiatorgroup_id", "target_id"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
d = {
"iscsi_target": target_id,
"iscsi_target_initiatorgroup": initiatorgroup_id,
"iscsi_target_portalgroup": portal_id,
"iscsi_target_authtype": authtype,
"iscsi_target_authgroup": -1,
"iscsi_target_initialdigest": "Auto",
}
if authgroup_id:
d["iscsi_target_authgroup"] = authgroup_id
print(d)
buff = self.post('/services/iscsi/targetgroup/', d)
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
# target
def del_iscsi_target(self, target_id=None, **kwargs):
content = self.get_iscsi_target_id(target_id)
try:
data = json.loads(content)
except ValueError:
raise ex.excError("target not found")
self._del_iscsi_target(target_id=target_id, **kwargs)
print(json.dumps(data, indent=8))
return data
def _del_iscsi_target(self, target_id=None, **kwargs):
if target_id is None:
raise ex.excError("'target_id' is mandatory")
response = self.delete('/services/iscsi/target/%d' % target_id)
if response.status_code != 204:
raise ex.excError(str(response))
def add_iscsi_target(self, **kwargs):
data = self._add_iscsi_target(**kwargs)
print(json.dumps(data, indent=8))
return data
def _add_iscsi_target(self, name=None, alias=None, **kwargs):
for key in ["name"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
d = {
"iscsi_target_name": name,
}
if alias:
d["iscsi_target_alias"] = alias
buff = self.post('/services/iscsi/target/', d)
try:
return json.loads(buff)
except ValueError:
raise ex.excError(buff)
def add_iscsi_file(self, name=None, size=None, volume=None, targets=None,
mappings=None, insecure_tpc=True, blocksize=512, **kwargs):
for key in ["name", "size", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
if targets is None and mappings is None:
raise ex.excError("'targets' or 'mappings' must be specified")
if mappings is not None and targets is None:
targets = self.translate_mappings(mappings)
data = self.add_iscsi_file_extent(name=name, size=size, volume=volume, **kwargs)
if "id" not in data:
if "iscsi_target_extent_name" in data:
if isinstance(data["iscsi_target_extent_name"], list):
raise ex.excError("\n".join(data["iscsi_target_extent_name"]))
raise ex.excError(data["iscsi_target_extent_name"])
raise ex.excError(str(data))
self.add_iscsi_targets_to_extent(extent_id=data["id"], targets=targets, **kwargs)
disk_id = data["iscsi_target_extent_naa"].replace("0x", "")
results = {
"driver_data": data,
"disk_id": disk_id,
"disk_devid": data["id"],
"mappings": self.list_mappings(naa=disk_id),
}
return results
def del_iscsi_file(self, name=None, naa=None, **kwargs):
if name is None and naa is None:
raise ex.excError("'name' or 'naa' must be specified")
data = self.get_iscsi_extent(name=name, naa=naa)
if data is None:
return
self.del_iscsi_extent(data["id"])
print(json.dumps(data, indent=8))
def translate_mappings(self, mappings):
targets = set()
for mapping in mappings:
elements = mapping.split(":")
targets |= set(elements[-1].split(","))
targets = list(targets)
return targets
def add_iscsi_zvol(self, name=None, size=None, volume=None, targets=None,
mappings=None, insecure_tpc=True, blocksize=512, **kwargs):
for key in ["name", "size", "volume"]:
if locals()[key] is None:
raise ex.excError("'%s' key is mandatory" % key)
if targets is None and mappings is None:
raise ex.excError("'targets' or 'mappings' must be specified")
if mappings is not None and targets is None:
targets = self.translate_mappings(mappings)
data = self.add_iscsi_zvol_extent(name=name, size=size, volume=volume, **kwargs)
if "id" not in data:
if "iscsi_target_extent_name" in data:
if isinstance(data["iscsi_target_extent_name"], list):
raise ex.excError("\n".join(data["iscsi_target_extent_name"]))
raise ex.excError(data["iscsi_target_extent_name"])
raise ex.excError(str(data))
self.add_iscsi_targets_to_extent(extent_id=data["id"], targets=targets, **kwargs)
self.add_diskinfo(data, size, volume)
disk_id = data["iscsi_target_extent_naa"].replace("0x", "")
results = {
"driver_data": data,
"disk_id": disk_id,
"disk_devid": data["id"],
"mappings": self.list_mappings(naa=disk_id),
}
return results
def del_iscsi_zvol(self, name=None, naa=None, **kwargs):
if name is None and naa is None:
raise ex.excError("'name' or 'naa' must be specified")
data = self.get_iscsi_extent(name=name, naa=naa)
if data is None:
return
volume = self.extent_volume(data)
self.del_iscsi_extent(data["id"])
self.del_zvol(name=name, volume=volume)
self.del_diskinfo(data["iscsi_target_extent_naa"].replace("0x", ""))
print(json.dumps(data, indent=8))
def extent_volume(self, data):
path = data["iscsi_target_extent_path"].split("/")
volume = path[path.index("zvol")+1]
return volume
def list_volume(self, **kwargs):
data = json.loads(self.get_volumes())
print(json.dumps(data, indent=8))
def list_iscsi_target(self, **kwargs):
data = json.loads(self.get_iscsi_targets())
print(json.dumps(data, indent=8))
def list_iscsi_targettoextent(self, **kwargs):
data = json.loads(self.get_iscsi_targettoextents())
print(json.dumps(data, indent=8))
def list_iscsi_portal(self, **kwargs):
data = json.loads(self.get_iscsi_portal())
print(json.dumps(data, indent=8))
def list_iscsi_targetgroup(self, **kwargs):
data = json.loads(self.get_iscsi_targetgroup())
print(json.dumps(data, indent=8))
def list_iscsi_extent(self, **kwargs):
data = json.loads(self.get_iscsi_extents())
print(json.dumps(data, indent=8))
def list_iscsi_initiatorgroup(self, **kwargs):
data = json.loads(self.get_iscsi_authorizedinitiator())
print(json.dumps(data, indent=8))
def del_diskinfo(self, disk_id):
if disk_id in (None, ""):
return
if self.node is None:
return
try:
result = self.node.collector_rest_delete("/disks/%s" % disk_id)
except Exception as exc:
raise ex.excError(str(exc))
if "error" in result:
raise ex.excError(result["error"])
return result
def add_diskinfo(self, data, size=None, volume=None):
if self.node is None:
return
try:
result = self.node.collector_rest_post("/disks", {
"disk_id": data["iscsi_target_extent_naa"].replace("0x", ""),
"disk_devid": data["id"],
"disk_name": data["iscsi_target_extent_name"],
"disk_size": convert_size(size, _to="MB"),
"disk_alloc": 0,
"disk_arrayid": self.name,
"disk_group": volume,
})
except Exception as exc:
raise ex.excError(str(exc))
if "error" in data:
raise ex.excError(result["error"])
return result
def do_action(action, array_name=None, node=None, **kwargs):
o = Freenass()
array = o.get_freenas(array_name)
if array is None:
raise ex.excError("array %s not found" % array_name)
array.node = node
if not hasattr(array, action):
raise ex.excError("not implemented")
result = getattr(array, action)(**kwargs)
if result is not None:
print(json.dumps(result, indent=4))
def main(argv, node=None):
parser = OptParser(prog=PROG, options=OPT, actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
global_options=GLOBAL_OPTS)
options, action = parser.parse_args(argv)
kwargs = vars(options)
do_action(action, node=node, **kwargs)
if __name__ == "__main__":
try:
main(sys.argv)
ret = 0
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
sys.exit(ret)
opensvc-1.8~20170412/lib/resContainerLdom.py 0000644 0001750 0001750 00000010405 13073467726 020653 0 ustar jkelbert jkelbert import resources as Res
import rcExceptions as ex
from rcUtilities import qcall
from rcUtilitiesSunOS import check_ping
import resContainer
from rcGlobalEnv import rcEnv
import os
class Ldom(resContainer.Container):
def __init__(self,
rid,
name,
guestos="SunOS",
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.ldom",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.shutdown_timeout = 240
self.sshbin = '/usr/local/bin/ssh'
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def files_to_sync(self):
import glob
a = []
ldomf = os.path.join(rcEnv.pathvar, 'ldom_'+self.name+'.*')
files = glob.glob(ldomf)
if len(files) > 0:
a += files
return a
def check_capabilities(self):
cmd = ['/usr/sbin/ldm', 'list' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
return True
def state(self):
""" ldm state : None/inactive/bound/active
ldm list -p domainname outputs:
VERSION
DOMAIN|[varname=varvalue]*
"""
cmd = ['/usr/sbin/ldm', 'list', '-p', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
return None
for word in out.split("|"):
a=word.split('=')
if len(a) == 2:
if a[0] == 'state':
return a[1]
return None
def ping(self):
return check_ping(self.addr)
def container_action(self,action):
cmd = ['/usr/sbin/ldm', action, self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
return None
def container_start(self):
""" ldm bind domain
ldm start domain
"""
state = self.state()
if state == 'None':
raise ex.excError
if state == 'inactive':
self.container_action('bind')
self.container_action('start')
if state == 'bound' :
self.container_action('start')
def container_forcestop(self):
""" ldm unbind domain
ldm stop domain
"""
if self.state == 'active':
try:
self.container_action('stop')
except ex.excError:
pass
self.container_action('unbind')
def container_stop(self):
""" launch init 5 into container
wait_for_shutdown
ldm stop domain
ldm unbind domain
"""
state = self.state()
if state == 'None':
raise ex.excError
if state == 'inactive':
return None
if state == 'bound' :
self.container_action('unbind')
if state == 'active' :
cmd = rcEnv.rsh.split() + [ self.name, '/usr/sbin/init', '5' ]
(ret, buff, err) = self.vcall(cmd)
if ret == 0:
try:
self.log.info("wait for container shutdown")
self.wait_for_fn(self.is_shutdown, self.shutdown_timeout, 2)
except ex.excError:
pass
self.container_forcestop()
def check_manual_boot(self):
cmd = ['/usr/sbin/ldm', 'list-variable', 'auto-boot?', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
if out != 'auto-boot?=False' :
return True
self.log.info("Auto boot should be turned off")
return False
def is_shutdown(self):
state = self.state()
if state == 'inactive' or state == 'bound':
return True
return False
def is_down(self):
if self.state() == 'inactive':
return True
return False
def is_up(self):
if self.state() == 'active':
return True
return False
opensvc-1.8~20170412/lib/checkFsInodeDarwin.py 0000644 0001750 0001750 00000001677 13073467726 021110 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_i"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lPi']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 9:
continue
if l[5].startswith('/Volumes'):
continue
r.append({
'chk_instance': l[8],
'chk_value': l[7],
'chk_svcname': self.find_svc(l[8]),
})
return r
opensvc-1.8~20170412/lib/resDiskLoopSunOS.py 0000644 0001750 0001750 00000001047 13073467726 020573 0 ustar jkelbert jkelbert import resources
from rcGlobalEnv import *
import rcStatus
class Loop(resources.Resource):
def is_up(self):
"""
Returns True if the volume group is present and activated
"""
return True
def start(self):
pass
def stop(self):
pass
def status(self, verbose=False):
if self.is_up():
return rcStatus.UP
else:
return rcStatus.DOWN
def __init__(self, file, **kwargs):
resources.Resource.__init__(self, **kwargs)
self.file = file
opensvc-1.8~20170412/lib/resDiskVgAdvfs.py 0000644 0001750 0001750 00000005637 13073467726 020303 0 ustar jkelbert jkelbert from rcGlobalEnv import rcEnv
import resDisk
from rcUtilities import justcall
import os
import rcExceptions as ex
import re
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
name=None,
**kwargs):
self.label = 'fdmn ' + name
resDisk.Disk.__init__(self,
rid=rid,
name=name,
type='disk.vg',
**kwargs)
def disklist_name(self):
return os.path.join(rcEnv.pathvar, 'vg_' + self.svc.svcname + '_' + self.name + '.disklist')
def files_to_sync(self):
return [self.disklist_name()]
def presync(self):
""" this one is exported as a service command line arg
"""
dl = self._disklist()
import json
with open(self.disklist_name(), 'w') as f:
f.write(json.dumps(list(dl)))
def has_it(self):
"""Returns True if the pool is present
"""
if os.path.exists("/etc/fdmns/"+self.name):
return True
return False
def is_up(self):
"""Returns True if the fdmn is present and activated
"""
if not self.has_it():
return False
cmd = [ 'showfdmn', self.name ]
out, err, ret = justcall(cmd)
if ret != 0:
if len(err) > 0:
self.status_log(err)
return False
if 'not active' in out:
return False
return True
def do_start(self):
pass
def do_stop(self):
pass
def disklist(self):
if not os.path.exists(self.disklist_name()):
s = self.svc.group_status(excluded_groups=set(["sync", "hb"]))
import rcStatus
if s['overall'].status == rcStatus.UP:
self.log.debug("no disklist cache file and service up ... refresh disklist cache")
self.presync()
else:
self.log.debug("no disklist cache file and service not up ... unable to evaluate disklist")
return set([])
with open(self.disklist_name(), 'r') as f:
buff = f.read()
import json
try:
dl = set(json.loads(buff))
except:
self.log.error("corrupted disklist cache file %s"%self.disklist_name())
raise ex.excError
return dl
def _disklist(self):
# return cache if initialized
if len(self.disks) > 0 :
return self.disks
disks = set([])
if not os.path.exists("/etc/fdmns/"+self.name):
return disks
import glob
dl = glob.glob("/etc/fdmns/"+self.name+"/*")
dl = map(lambda x: os.readlink(x), dl)
self.disks = set(dl)
self.log.debug("found disks %s held by pool %s" % (disks, self.name))
return self.disks
if __name__ == "__main__":
p=Disk(name="dom1")
print p._disklist()
opensvc-1.8~20170412/lib/svcDict.py 0000644 0001750 0001750 00000510470 13073467726 017011 0 ustar jkelbert jkelbert import sys
import os
from rcGlobalEnv import rcEnv
from textwrap import TextWrapper
from rcNode import node_get_node_env
deprecated_keywords = {
"DEFAULT.service_type": "env",
"disk.lvm.vgname": "name",
"disk.pool.poolname": "name",
"disk.vg.vgname": "name",
"sync.rsync.exclude": "options",
"disk.zpool.poolname": "name",
}
deprecated_sections = {
"disk.pool": ["disk", "zpool"],
"drbd": ["disk", "drbd"],
"loop": ["disk", "loop"],
"pool": ["disk", "zpool"],
"vdisk": ["disk", "vdisk"],
"vmdg": ["disk", "vmdg"],
"vg": ["disk", "vg"],
}
class MissKeyNoDefault(Exception):
pass
class KeyInvalidValue(Exception):
pass
class Keyword(object):
def __init__(self, section, keyword,
rtype=None,
order=100,
required=False,
generic=False,
at=False,
default=None,
default_text=None,
validator=None,
candidates=None,
strict_candidates=True,
depends=[],
text="",
example="foo",
provisioning=False):
self.section = section
self.keyword = keyword
if rtype is None or type(rtype) == list:
self.rtype = rtype
else:
self.rtype = [rtype]
self.order = order
self.generic = generic
self.at = at
self.required = required
self.default = default
self.default_text = default_text
self.candidates = candidates
self.strict_candidates = strict_candidates
self.depends = depends
self.text = text
self.example = example
self.provisioning = provisioning
if self.default_text is None:
self.default_text = self.default
def __lt__(self, o):
return self.order < o.order
def deprecated(self):
if self.rtype is None:
if self.section+"."+self.keyword in deprecated_keywords:
return True
else:
return False
for rtype in self.rtype:
if self.section+"."+rtype+"."+self.keyword in deprecated_keywords:
return True
return False
def template(self):
if self.deprecated():
return ''
wrapper = TextWrapper(subsequent_indent="#%15s"%"", width=78)
depends = " && ".join(map(lambda d: "%s in %s"%(d[0], d[1]), self.depends))
if depends == "":
depends = None
if type(self.candidates) in (list, tuple, set):
candidates = " | ".join(map(lambda x: str(x), self.candidates))
else:
candidates = str(self.candidates)
if not self.strict_candidates:
candidates += " ..."
s = '#\n'
s += "# keyword: %s\n"%self.keyword
s += "# ----------------------------------------------------------------------------\n"
s += "# required: %s\n"%str(self.required)
s += "# provisioning: %s\n"%str(self.provisioning)
s += "# default: %s\n"%str(self.default_text)
s += "# candidates: %s\n"%candidates
s += "# depends: %s\n"%depends
s += "# scopable: %s\n"%str(self.at)
s += '#\n'
if self.text:
wrapper = TextWrapper(subsequent_indent="#%9s"%"", width=78)
s += wrapper.fill("# desc: "+self.text) + "\n"
s += '#\n'
if self.default_text is not None:
val = self.default_text
elif self.candidates and len(self.candidates) > 0:
val = self.candidates[0]
else:
val = self.example
s += ";" + self.keyword + " = " + str(val) + "\n\n"
return s
def __str__(self):
if self.deprecated():
return ''
wrapper = TextWrapper(subsequent_indent="%15s"%"", width=78)
depends = ""
for d in self.depends:
depends += "%s in %s\n"%(d[0], d[1])
if depends == "":
depends = None
if type(self.candidates) in (list, tuple, set):
candidates = " | ".join(map(lambda x: str(x), self.candidates))
else:
candidates = str(self.candidates)
if not self.strict_candidates:
candidates += " ..."
s = ''
s += "------------------------------------------------------------------------------\n"
s += "section: %s\n"%self.section
s += "keyword: %s\n"%self.keyword
s += "------------------------------------------------------------------------------\n"
s += " required: %s\n"%str(self.required)
s += " provisioning: %s\n"%str(self.provisioning)
s += " default: %s\n"%str(self.default)
s += " candidates: %s\n"%candidates
s += " depends: %s\n"%depends
s += " scopable: %s\n"%str(self.at)
if self.text:
s += wrapper.fill(" help: "+self.text)
if self.at:
s += "\n\nPrefix the value with '@ ', '@nodes ', '@drpnodes ', '@flex_primary', '@drp_flex_primary' or '@encapnodes '\n"
s += "to specify a scope-specific value.\n"
s += "You will be prompted for new values until you submit an empty value.\n"
s += "\n"
return s
def form(self, d):
if self.deprecated():
return
# skip this form if dependencies are not met
for d_keyword, d_value in self.depends:
if d is None:
return d
if d_keyword not in d:
return d
if d[d_keyword] not in d_value:
return d
# print() the form
print(self)
# if we got a json seed, use its values as default
# else use the Keyword object default
if d and self.keyword in d:
default = d[self.keyword]
elif self.default is not None:
default = self.default
else:
default = None
if default is not None:
default_prompt = " [%s] "%str(default)
else:
default_prompt = ""
req_satisfied = False
while True:
try:
val = raw_input(self.keyword+default_prompt+"> ")
except EOFError:
break
if len(val) == 0:
if req_satisfied:
return d
if default is None and self.required:
print("value required")
continue
# keyword is optional, leave dictionary untouched
return d
elif self.at and val[0] == '@':
l = val.split()
if len(l) < 2:
print("invalid value")
continue
val = ' '.join(l[1:])
d[self.keyword+l[0]] = val
req_satisfied = True
else:
d[self.keyword] = val
req_satisfied = True
if self.at:
# loop for more key@ = values
print("More '%s' ? to step to the next parameter."%self.keyword)
continue
else:
return d
class Section(object):
def __init__(self, section):
self.section = section
self.keywords = []
def __iadd__(self, o):
if not isinstance(o, Keyword):
return self
self.keywords.append(o)
return self
def __str__(self):
s = ''
for keyword in sorted(self.keywords):
s += str(keyword)
return s
def template(self):
k = self.getkey("type")
if k is None:
return self._template()
if k.candidates is None:
return self._template()
s = ""
if not k.strict_candidates:
s += self._template()
for t in k.candidates:
s += self._template(t)
return s
def _template(self, rtype=None):
section = self.section
if self.section in deprecated_sections:
return ""
if rtype and self.section+"."+rtype in deprecated_sections:
return ""
dpath = rcEnv.pathdoc
fpath = os.path.join(dpath, "template."+section+".conf")
if rtype:
section += ", type "+rtype
fpath = os.path.join(dpath, "template."+self.section+"."+rtype+".conf")
s = "#"*78 + "\n"
s += "# %-74s #\n" % " "
s += "# %-74s #\n" % section
s += "# %-74s #\n" % " "
s += "#"*78 + "\n\n"
if section in ("DEFAULT", "env"):
s += "[%s]\n" % self.section
else:
s += "[%s#0]\n" % self.section
if rtype is not None:
s += ";type = " + rtype + "\n\n"
for keyword in sorted(self.getkeys(rtype)):
s += keyword.template()
for keyword in sorted(self.getprovkeys(rtype)):
s += keyword.template()
if rtype is not None:
for keyword in sorted(self.getkeys()):
if keyword.keyword == "type":
continue
s += keyword.template()
with open(fpath, "w") as f:
f.write(s)
return s
def getkeys(self, rtype=None):
if rtype is None:
return [k for k in self.keywords if k.rtype is None and not k.provisioning]
else:
return [k for k in self.keywords if k.rtype and rtype in k.rtype and not k.provisioning]
def getprovkeys(self, rtype=None):
if rtype is None:
return [k for k in self.keywords if k.rtype is None and k.provisioning]
else:
return [k for k in self.keywords if k.rtype and rtype in k.rtype and k.provisioning]
def getkey(self, keyword, rtype=None):
if '@' in keyword:
l = keyword.split('@')
if len(l) != 2:
return None
keyword, node = l
if rtype:
for k in self.keywords:
if k.keyword == keyword and k.rtype and rtype in k.rtype:
return k
else:
for k in self.keywords:
if k.keyword == keyword:
return k
return None
class KeywordStore(dict):
def __init__(self, provision=False):
self.sections = {}
self.provision = provision
def __iadd__(self, o):
if not isinstance(o, Keyword):
return self
o.top = self
if o.section not in self.sections:
self.sections[o.section] = Section(o.section)
self.sections[o.section] += o
return self
def __getattr__(self, key):
return self.sections[str(key)]
def __getitem__(self, key):
k = str(key)
if k not in self.sections:
return Section(k)
return self.sections[str(key)]
def __str__(self):
s = ''
for section in self.sections:
s += str(self.sections[section])
return s
def print_templates(self):
for section in sorted(self.sections.keys()):
print(self.sections[section].template())
def required_keys(self, section, rtype=None):
if section not in self.sections:
return []
return [k for k in sorted(self.sections[section].getkeys(rtype)) if k.required is True]
def purge_keywords_from_dict(self, d, section):
if section == "env":
return d
if 'type' in d:
rtype = d['type']
else:
rtype = None
delete_keywords = []
for keyword, value in d.items():
key = self.sections[section].getkey(keyword)
if key is None and rtype is not None:
key = self.sections[section].getkey(keyword, rtype)
if key is None:
if keyword != "rtype":
print("Remove unknown keyword '%s' from section '%s'"%(keyword, section))
delete_keywords.append(keyword)
for keyword in delete_keywords:
del d[keyword]
return d
def update(self, rid, d):
""" Given a resource dictionary, spot missing required keys
and provide a new dictionary to merge populated by default
values
"""
import copy
completion = copy.copy(d)
# decompose rid into section and rtype
if rid in ('DEFAULT', 'env'):
section = rid
rtype = None
else:
if '#' not in rid:
return {}
l = rid.split('#')
if len(l) != 2:
return {}
section = l[0]
if 'type' in d:
rtype = d['type']
elif self[section].getkey('type') is not None and \
self[section].getkey('type').default is not None:
rtype = self[section].getkey('type').default
else:
rtype = None
# validate command line dictionary
for keyword, value in d.items():
if section == "env":
break
key = self.sections[section].getkey(keyword)
if key is None and rtype is not None:
key = self.sections[section].getkey(keyword, rtype)
if key is None:
continue
if key.strict_candidates and key.candidates is not None and value not in key.candidates:
print("'%s' keyword has invalid value '%s' in section '%s'"%(keyword, str(value), rid))
raise KeyInvalidValue()
# add missing required keys if they have a known default value
for key in self.required_keys(section, rtype):
fkey = ".".join((section, str(rtype), key.keyword))
if fkey in deprecated_keywords:
continue
if key.keyword in d:
continue
if key.keyword in map(lambda x: x.split('@')[0], d.keys()):
continue
if key.default is None:
sys.stderr.write("No default value for required key '%s' in section '%s'\n"%(key.keyword, rid))
raise MissKeyNoDefault()
print("Implicitely add [%s] %s = %s" % (rid, key.keyword, str(key.default)))
completion[key.keyword] = key.default
# purge unknown keywords and provisioning keywords
completion = self.purge_keywords_from_dict(completion, section)
return completion
def form_sections(self, sections):
wrapper = TextWrapper(subsequent_indent="%18s"%"", width=78)
candidates = set(self.sections.keys()) - set(['DEFAULT'])
print("------------------------------------------------------------------------------")
print("Choose a resource type to add or a resource to edit.")
print("Enter 'quit' to finish the creation.")
print("------------------------------------------------------------------------------")
print(wrapper.fill("resource types: "+', '.join(candidates)))
print(wrapper.fill("resource ids: "+', '.join(sections.keys())))
print
return raw_input("resource type or id> ")
def free_resource_index(self, section, sections):
indices = []
for s in sections:
l = s.split('#')
if len(l) != 2:
continue
sname, sindex = l
if section != sname:
continue
try:
indices.append(int(sindex))
except:
continue
i = 0
while True:
if i not in indices:
return i
i += 1
def form(self, defaults, sections):
for key in sorted(self.DEFAULT.getkeys()):
defaults = key.form(defaults)
while True:
try:
section = self.form_sections(sections)
except EOFError:
break
if section == "quit":
break
if '#' in section:
rid = section
section = section.split('#')[0]
else:
index = self.free_resource_index(section, sections)
rid = '#'.join((section, str(index)))
if section not in self.sections:
print("unsupported resource type")
continue
for key in sorted(self.sections[section].getkeys()):
if rid not in sections:
sections[rid] = {}
sections[rid] = key.form(sections[rid])
if 'type' in sections[rid]:
specific_keys = self.sections[section].getkeys(rtype=sections[rid]['type'])
if len(specific_keys) > 0:
print("\nKeywords specific to the '%s' driver\n"%sections[rid]['type'])
for key in sorted(specific_keys):
if rid not in sections:
sections[rid] = {}
sections[rid] = key.form(sections[rid])
# purge the provisioning keywords
sections[rid] = self.purge_keywords_from_dict(sections[rid], section)
return defaults, sections
class KeywordInteger(Keyword):
def validator(self, val, d=None):
try:
val = int(val)
except:
return False
return True
class KeywordProvision(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="provision",
keyword="provision",
default="no",
candidates=('yes', 'no'),
text="Say yes to provision this resource. Warning, provisioning implies destructive operations like formating."
)
class KeywordLockTimeout(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="lock_timeout",
required=False,
order=10,
default=60,
text="The duration in seconds the agent wait for the action lock acquisition before aborting the action. The svcmgr --waitlock parameter overides this option."
)
class KeywordMode(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="mode",
required=False,
order=10,
default="hosted",
candidates=["hosted", "sg", "vcs", "rhcs"],
text="The mode decides upon disposition OpenSVC takes to bring a service up or down : virtualized services need special actions to prepare and boot the container for example, which is not needed for 'hosted' services."
)
class KeywordPkgName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="pkg_name",
at=True,
required=False,
order=11,
depends=[('mode', ["vcs", "sg", "rhcs"])],
text="The wrapped cluster package name, as known to the cluster manager in charge."
)
class KeywordRollback(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="rollback",
at=True,
required=False,
order=11,
default=True,
text="If set to False, the default rollback on action error is inhibited, leaving the service in its half-started state."
)
class KeywordCompSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="comp_schedule",
at=True,
required=False,
order=11,
default="00:00-06:00@361",
text="The service compliance run schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordStatusSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="status_schedule",
at=True,
required=False,
order=11,
default="@10",
text="The service status evaluation schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordDefaultSyncSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="sync_schedule",
at=True,
required=False,
order=11,
default="04:00-06:00@121",
text="The default sync resources schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordResinfoSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="resinfo_schedule",
at=True,
required=False,
order=11,
default="@60",
text="The service resource info push schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordMonitorSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="monitor_schedule",
at=True,
required=False,
order=11,
default="@1",
text="The service resource monitor schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordPushSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="push_schedule",
at=True,
required=False,
order=11,
default="00:00-06:00@361",
text="The service configuration emission to the collector schedule. See usr/share/doc/template.node.conf for the schedule syntax."
)
class KeywordFlexPrimary(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="flex_primary",
at=True,
required=False,
order=11,
depends=[('cluster_type', ["flex"])],
default_text="",
text="The node in charge of syncing the other nodes. --cluster actions on the flex_primary are execute on all peer nodes (ie, not drpnodes)."
)
class KeywordDrpFlexPrimary(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="drp_flex_primary",
at=True,
required=False,
order=11,
depends=[('cluster_type', ["flex"])],
default_text="",
text="The drpnode in charge of syncing the other drpnodes. --cluster actions on the drp_flex_primary are execute on all drpnodes (ie, not pri nodes)."
)
class KeywordDockerSwarmManagers(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_swarm_managers",
order=20,
at=True,
required=False,
text="List of nodes promoted as docker swarm managers.The flex primary node is implicitely a manager. Whitespace separated."
)
class KeywordDockerExe(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_exe",
at=True,
required=False,
order=12,
text="If you have multiple docker versions installed and want the service to stick to a version whatever the PATH definition, you should set this parameter to the full path to the docker executable.",
example="/usr/bin/docker-1.8"
)
class KeywordDockerDataDir(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_data_dir",
at=True,
required=False,
order=12,
text="If the service has docker-type container resources and docker_daemon_private is set to True, the service handles the startup of a private docker daemon. Its socket is //docker.sock, and its data directory must be specified using this parameter. This organization is necessary to enable service relocalization.",
example="/srv/svc1/data/docker"
)
class KeywordDockerDaemonPrivate(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_daemon_private",
at=True,
required=False,
default=True,
order=11,
text="If set to False, this service will use the system's shared docker daemon instance. This is parameter is forced to False on non-Linux systems.",
example="True"
)
class KeywordDockerDaemonArgs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_daemon_args",
at=True,
required=False,
order=12,
text="If the service has docker-type container resources, the service handles the startup of a private docker daemon. OpenSVC sets the socket and data dir parameters. Admins can set extra parameters using this keyword. For example, it can be useful to set the --ip parameter for a docker registry service.",
example="--ip 1.2.3.4"
)
class KeywordDockerSwarmArgs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="docker_swarm_args",
at=True,
required=False,
order=12,
text="The arguments passed to docker swarm init on the flex primary, and to docker swarm join on the the other nodes. The --token argument must not be specified, as it is handled by the agent. Scoping this parameter permits to set additional parameters on the flex_primary for use with swarm init only, like --autolock.",
example="--advertize-addr {ip#0.ipname} --listen-addr {ip#0.ipname}",
)
class KeywordSubsetParallel(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="subset",
keyword="parallel",
at=True,
candidates=(True, False),
default=False,
text="If set to true, actions are executed in parallel amongst the subset member resources.",
required=False,
order=2
)
class KeywordStonithType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="stonith",
keyword="type",
at=True,
candidates=["ilo", "callout"],
text="The type of stonith.",
required=True,
order=1
)
class KeywordStonithTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="stonith",
keyword="target",
at=True,
text="The server management console to pass the stonith command to, as defined in the corresponding auth.conf section title.",
required=True,
order=2
)
class KeywordStonithCalloutCmd(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="stonith",
rtype="callout",
at=True,
keyword="cmd",
text="The command to execute on target to stonith.",
required=True,
order=3
)
class KeywordContainerType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="type",
at=True,
candidates=rcEnv.vt_supported,
text="The type of container.",
required=True,
order=1
)
class KeywordContainerZoneDeleteOnStop(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
rtype="zone",
keyword="delete_on_stop",
at=True,
candidates=(True, False),
text="If set to true, the zone configuration is deleted after a resource stop. The agent maintains an export of the configuration for the next start. This export is replicated to the other nodes and drp nodes so they can take over the zone even if it is completely hosted on a shared disk.",
default=False,
required=False,
order=1
)
class KeywordDockerDockerService(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="docker_service",
at=True,
order=9,
required=False,
rtype="docker",
default=False,
candidates=(True, False),
text="If set to True, run this container as a docker service, which is possible if the cluster_type is set to flex and the docker swarm properly initialized.",
example=False
)
class KeywordDockerRunImage(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="run_image",
at=True,
order=9,
required=False,
rtype="docker",
text="The docker image pull, and run the container with.",
example="83f2a3dd2980"
)
class KeywordDockerRunCommand(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="run_command",
at=True,
order=1,
required=False,
rtype="docker",
text="The command to execute in the docker container on run.",
example="/opt/tomcat/bin/catalina.sh"
)
class KeywordDockerRunArgs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="run_args",
at=True,
order=2,
required=False,
rtype="docker",
text="Extra arguments to pass to the docker run command, like volume and port mappings.",
example="-v /opt/docker.opensvc.com/vol1:/vol1:rw -p 37.59.71.25:8080:8080"
)
class KeywordVirtinst(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="virtinst",
rtype=["kvm", "xen", "ovm"],
text="The virt-install command to use to create the container.",
required=True,
provisioning=True
)
class KeywordSnap(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="snap",
rtype=["kvm", "xen", "ovm", "zone", "esx"],
text="The target snapshot/clone full path containing the new container disk files.",
required=True,
provisioning=True
)
class KeywordSnapof(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="snapof",
rtype=["kvm", "xen", "ovm", "zone", "esx"],
text="The snapshot origin full path containing the reference container disk files.",
required=True,
provisioning=True
)
class KeywordContainerOrigin(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="container_origin",
rtype="zone",
text="The origin container having the reference container disk files.",
required=True,
provisioning=True
)
class KeywordJailRoot(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="jailroot",
rtype="jail",
text="Sets the root fs directory of the container",
required=True,
provisioning=False
)
class KeywordLxcCf(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="cf",
rtype="lxc",
text="Defines a lxc configuration file in a non-standard location.",
required=False,
provisioning=True,
example="/srv/mycontainer/config"
)
class KeywordRootfs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="rootfs",
rtype=["lxc", "vz", "zone"],
text="Sets the root fs directory of the container",
required=True,
provisioning=True
)
class KeywordTemplate(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="template",
rtype=["lxc", "vz", "zone"],
text="Sets the url of the template unpacked into the container root fs.",
required=True,
provisioning=True
)
class KeywordVmName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="name",
at=True,
order=2,
rtype=rcEnv.vt_supported,
text="This need to be set if the virtual machine name is different from the service name."
)
class KeywordContainerRcmd(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="rcmd",
at=True,
order=2,
rtype="lxc",
example="lxc-attach -e -n osvtavnprov01 -- ",
text="An container remote command override the agent default"
)
class KeywordOsvcRootPath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="osvc_root_path",
at=True,
order=2,
rtype=rcEnv.vt_supported,
example="/opt/opensvc",
text="If the OpenSVC agent is installed via package in the container, this parameter must not be set or set to 'None'. Else the value can be set to the fullpath hosting the agent installed from sources."
)
class KeywordGuestos(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="guestos",
at=True,
rtype=rcEnv.vt_supported,
order=11,
candidates=["unix", "windows"],
default=None,
text="The operating system in the virtual machine."
)
class KeywordJailIps(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="ips",
at=True,
rtype="jail",
order=11,
text="The ipv4 addresses of the jail."
)
class KeywordJailIp6s(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="ip6s",
at=True,
rtype="jail",
order=11,
text="The ipv6 addresses of the jail."
)
class KeywordSharedIpGroup(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="shared_ip_group",
at=True,
order=11,
rtype=rcEnv.vt_cloud,
text="The cloud shared ip group name to allocate a public ip from."
)
class KeywordSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="size",
at=True,
order=11,
rtype=rcEnv.vt_cloud,
text="The cloud vm size, as known to the cloud manager. Example: tiny."
)
class KeywordKeyName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="key_name",
at=True,
order=11,
rtype=rcEnv.vt_cloud,
text="The key name, as known to the cloud manager, to trust in the provisioned vm."
)
class KeywordSrpPrmCores(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="prm_cores",
order=11,
rtype="srp",
default=1,
provisioning=True,
text="The number of core to bind the SRP container to."
)
class KeywordSrpIp(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="ip",
at=True,
order=11,
rtype="srp",
provisioning=True,
text="The ip name or addr used to create the SRP container."
)
class KeywordSrpRootpath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="rootpath",
at=True,
order=11,
rtype="srp",
provisioning=True,
text="The path of the SRP container root filesystem."
)
class KeywordCloudId(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="cloud_id",
at=True,
order=11,
rtype=rcEnv.vt_cloud,
text="The cloud id as configured in node.conf. Example: cloud#1."
)
class KeywordVmUuid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="container",
keyword="uuid",
at=True,
order=11,
rtype="ovm",
text="The virtual machine unique identifier used to pass commands on the VM."
)
class KeywordAntiAffinity(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="anti_affinity",
at=True,
order=15,
required=False,
default=None,
text="A whitespace separated list of services this service is not allowed to be started on the same node. The svcmgr --ignore-affinity option can be set to override this policy.",
example="svc1 svc2"
)
class KeywordPrKey(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="prkey",
order=15,
at=True,
required=False,
text="Defines a specific default persistent reservation key for the service. A prkey set in a resource takes priority. If no prkey is specified in the service nor in the DEFAULT section, the prkey in node.conf is used. If node.conf has no prkey set, the hostid is computed and written in node.conf."
)
class KeywordNoPreemptAbort(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="no_preempt_abort",
order=15,
at=True,
required=False,
candidates=(True, False),
default=False,
text="If set to 'true', OpenSVC will preempt scsi reservation with a preempt command instead of a preempt and and abort. Some scsi target implementations do not support this last mode (esx). If set to 'false' or not set, 'no_preempt_abort' can be activated on a per-resource basis."
)
class KeywordCluster(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="cluster",
order=15,
required=False,
default=None,
text="The symbolic name of the cluster. Used to label shared disks represented to tiers-2 consumers like containers.",
example="cluster1"
)
class KeywordShowDisabled(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="show_disabled",
at=True,
order=15,
required=False,
default=True,
candidates=[True, False],
text="Specifies if the disabled resources must be included in the print status and json status output."
)
class KeywordClusterType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="cluster_type",
at=True,
order=15,
required=False,
default="failover",
candidates=["failover", "flex", "autoflex"],
text="failover: the service is allowed to be up on one node at a time. allactive: the service must be up on all nodes. flex: the service can be up on n out of m nodes (n <= m), n/m must be in the [flex_min_nodes, flex_max_nodes] range. autoflex: same as flex, but charge the collector to start the service on passive nodes when the average %cpu usage on active nodes > flex_cpu_high_threshold and stop the service on active nodes when the average %cpu usage on active nodes < flex_cpu_low_threshold."
)
class KeywordFlexMinNodes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="flex_min_nodes",
order=16,
required=False,
default=1,
depends=[("cluster_type", ["flex", "autoflex"])],
text="Minimum number of active nodes in the cluster. Below this number alerts are raised by the collector, and the collector won't stop any more service instances."
)
class KeywordFlexMaxNodes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="flex_max_nodes",
order=16,
required=False,
default=10,
depends=[("cluster_type", ["flex", "autoflex"])],
text="Maximum number of active nodes in the cluster. Above this number alerts are raised by the collector, and the collector won't start any more service instances. 0 means unlimited."
)
class KeywordFlexCpuMinThreshold(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="flex_cpu_min_threshold",
order=16,
required=False,
default=10,
depends=[("cluster_type", ["flex", "autoflex"])],
text="Average CPU usage across the active cluster nodes below which the collector raises alerts and decides to stop service instances with autoflex cluster type."
)
class KeywordFlexCpuMaxThreshold(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="flex_cpu_max_threshold",
order=16,
required=False,
default=70,
depends=[("cluster_type", ["flex", "autoflex"])],
text="Average CPU usage across the active cluster nodes above which the collector raises alerts and decides to start new service instances with autoflex cluster type."
)
class KeywordServiceType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="service_type",
order=15,
required=False,
candidates=rcEnv.allowed_svc_envs,
text="A non-PRD service can not be brought up on a PRD node, but a PRD service can be startup on a non-PRD node (in a DRP situation). The default value is the node env."
)
class KeywordServiceEnv(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="env",
order=15,
required=True,
default=node_get_node_env(),
default_text="",
candidates=rcEnv.allowed_svc_envs,
text="A non-PRD service can not be brought up on a PRD node, but a PRD service can be startup on a non-PRD node (in a DRP situation). The default value is the node env."
)
class KeywordNodes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="nodes",
order=20,
at=True,
required=True,
default=rcEnv.nodename,
default_text="",
text="List of cluster local nodes able to start the service. Whitespace separated."
)
class KeywordAutostartNode(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="autostart_node",
order=20,
at=True,
required=False,
default=rcEnv.nodename,
default_text="",
text="A whitespace-separated list subset of 'nodes'. Defines the nodes where the service will try to start on upon node reboot. On a failover cluster there should only be one autostart node and the start-up will fail if the service is already up on another node though. If not specified, the service will never be started at node boot-time, which is rarely the expected behaviour."
)
class KeywordDrpnode(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="drpnode",
order=21,
at=True,
text="The backup node where the service is activated in a DRP situation. This node is also a data synchronization target for 'sync' resources.",
example="node1"
)
class KeywordDrpnodes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="drpnodes",
order=21,
at=True,
text="Alternate backup nodes, where the service could be activated in a DRP situation if the 'drpnode' is not available. These nodes are also data synchronization targets for 'sync' resources.",
example="node1 node2"
)
class KeywordEncapnodes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="encapnodes",
order=21,
text="The list of containers handled by this service and with an OpenSVC agent installed to handle the encapsulated resources. With this parameter set, parameters can be scoped with the @encapnodes suffix.",
example="vm1 vm2"
)
class KeywordApp(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="app",
order=24,
default="DEFAULT",
text="Used to identify who is responsible for is service, who is billable and provides a most useful filtering key. Better keep it a short code."
)
class KeywordComment(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="comment",
order=25,
text="Helps users understand the role of the service, which is nice to on-call support people having to operate on a service they are not usually responsible for."
)
class KeywordScsireserv(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="scsireserv",
at=True,
order=25,
default=False,
candidates=(True, False),
text="If set to 'true', OpenSVC will try to acquire a type-5 (write exclusive, registrant only) scsi3 persistent reservation on every path to disks of every disk group attached to this service. Existing reservations are preempted to not block service start-up. If the start-up was not legitimate the data are still protected from being written over from both nodes. If set to 'false' or not set, 'scsireserv' can be activated on a per-resource basis."
)
class KeywordBwlimit(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="bwlimit",
order=25,
text="Bandwidth limit in KB applied to all rsync transfers. Leave empty to enforce no limit.",
example="3000"
)
class KeywordSyncInterval(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="sync_interval",
order=26,
default=121,
text="Set the minimum delay between syncs in minutes. If a sync is triggered through crond or manually, it is skipped if last sync occurred less than 'sync_min_delay' ago. The mecanism is enforced by a timestamp created upon each sync completion in /sync/[service]![dst]"
)
class KeywordSyncMaxDelay(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="sync_max_delay",
order=27,
default=1440,
text="Unit is minutes. This sets to delay above which the sync status of the resource is to be considered down. Should be set according to your application service level agreement. The cron job frequency should be set between 'sync_min_delay' and 'sync_max_delay'"
)
class KeywordPresnapTrigger(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="presnap_trigger",
order=28,
text="Define a command to run before creating snapshots. This is most likely what you need to use plug a script to put you data in a coherent state (alter begin backup and the like).",
example="/srv/svc1/etc/init.d/pre_snap.sh"
)
class KeywordPostsnapTrigger(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="postsnap_trigger",
order=29,
text="Define a command to run after snapshots are created. This is most likely what you need to use plug a script to undo the actions of 'presnap_trigger'.",
example="/srv/svc1/etc/init.d/post_snap.sh"
)
class KeywordMonitorAction(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="monitor_action",
at=True,
order=30,
default=None,
candidates=("reboot", "crash", "freezestop"),
text="The action to take when a monitored resource is not up nor standby up, and if the resource restart procedure has failed.",
example="reboot"
)
class KeywordCreatePg(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="DEFAULT",
keyword="create_pg",
order=30,
default=True,
candidates=(True, False),
text="Use process containers when possible. Containers allow capping memory, swap and cpu usage per service. Lxc containers are naturally containerized, so skip containerization of their startapp."
)
class KeywordPgCpus(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_cpus",
order=31,
depends=[('create_pg', [True])],
text="Allow service process to bind only the specified cpus. Cpus are specified as list or range : 0,1,2 or 0-2",
example="0-2"
)
class KeywordPgMems(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_mems",
order=31,
depends=[('create_pg', [True])],
text="Allow service process to bind only the specified memory nodes. Memory nodes are specified as list or range : 0,1,2 or 0-2",
example="0-2"
)
class KeywordPgCpuShare(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_cpu_shares",
order=31,
depends=[('create_pg', [True])],
text="Kernel default value is used, which usually is 1024 shares. In a cpu-bound situation, ensure the service does not use more than its share of cpu ressource. The actual percentile depends on shares allowed to other services.",
example="512"
)
class KeywordPgCpuQuota(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_cpu_quota",
order=31,
depends=[('create_pg', [True])],
text="The percent ratio of one core to allocate to the process group if % is specified, else the absolute value to set in the process group parameter. For example, on Linux cgroups, -1 means unlimited, and a positive absolute value means the number of microseconds to allocate each period. 50%@all means 50% of all cores, and 50%@2 means 50% of two cores.",
example="50%@all"
)
class KeywordPgMemOomControl(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_mem_oom_control",
order=31,
depends=[('create_pg', [True])],
text="A flag (0 or 1) that enables or disables the Out of Memory killer for a cgroup. If enabled (0), tasks that attempt to consume more memory than they are allowed are immediately killed by the OOM killer. The OOM killer is enabled by default in every cgroup using the memory subsystem; to disable it, write 1.",
example="1"
)
class KeywordPgMemLimit(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_mem_limit",
order=31,
depends=[('create_pg', [True])],
text="Ensures the service does not use more than specified memory (in bytes). The Out-Of-Memory killer get triggered in case of tresspassing.",
example="512000000"
)
class KeywordPgVmemLimit(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_vmem_limit",
order=31,
depends=[('create_pg', [True])],
text="Ensures the service does not use more than specified memory+swap (in bytes). The Out-Of-Memory killer get triggered in case of tresspassing. The specified value must be greater than pg_mem_limit.",
example="1024000000"
)
class KeywordPgMemSwappiness(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_mem_swappiness",
order=31,
depends=[('create_pg', [True])],
text="Set a swappiness value for the process group.",
example="40"
)
class KeywordPgBlkioWeight(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="DEFAULT",
keyword="pg_blkio_weight",
order=31,
depends=[('create_pg', [True])],
text="Block IO relative weight. Value: between 10 and 1000. Kernel default: 1000.",
example="50"
)
class KeywordAppScript(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="script",
at=True,
order=9,
required=True,
text="Full path to the app launcher script. Or its basename if the file is hosted in the .d path."
)
class KeywordAppTimeout(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="timeout",
order=9,
at=True,
required=False,
text="Wait for seconds max before declaring the app launcher action a failure. If no timeout is specified, the agent waits indefinitely for the app launcher to return. The timeout parameter can be coupled with optional=True to not abort a service start when an app launcher did not return.",
example="180"
)
class KeywordAppStart(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="start",
at=True,
order=10,
required=False,
text="Start up sequencing number."
)
class KeywordAppStop(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="stop",
at=True,
order=11,
required=False,
text="Stop sequencing number."
)
class KeywordAppCheck(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="check",
at=True,
order=11,
required=False,
text="Check up sequencing number."
)
class KeywordAppInfo(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="app",
keyword="info",
at=True,
order=12,
required=False,
text="Info up sequencing number."
)
class KeywordSyncType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="type",
order=10,
required=True,
candidates=("rsync", "docker", "dds", "netapp", "symsrdfs", "zfs", "btrfs", "symclone", "symsnap", "hp3par", "hp3parsnap", "evasnap", "ibmdssnap", "dcssnap", "dcsckpt", "necismsnap", "zfssnap", "btrfssnap", "rados", "s3"),
default="rsync",
text="Point a sync driver to use."
)
class KeywordSyncDockerTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="target",
rtype="docker",
order=11,
at=True,
required=True,
default=None,
candidates=["nodes", "drpnodes", "nodes drpnodes"],
text="Destination nodes of the sync."
)
class KeywordSyncS3Snar(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="snar",
rtype="s3",
order=10,
at=True,
required=False,
example="/srv/mysvc/var/sync.1.snar",
text="The GNU tar snar file full path. The snar file stored the GNU tar metadata needed to do an incremental tarball. If the service fails over shared disks the snar file should be stored there, so the failover node can continue the incremental cycle."
)
class KeywordSyncS3Src(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="src",
rtype="s3",
order=10,
at=True,
required=True,
example="/srv/mysvc/tools /srv/mysvc/apps*",
text="Source globs as passed as paths to archive to a tar command."
)
class KeywordSyncS3Options(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="options",
rtype="s3",
order=10,
at=True,
required=False,
example="--exclude *.pyc",
text="Options passed to GNU tar for archiving."
)
class KeywordSyncS3Bucket(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="bucket",
rtype="s3",
order=10,
at=True,
required=True,
example="opensvc-myapp",
text="The name of the S3 bucket to upload the backup to."
)
class KeywordSyncS3FullSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="full_schedule",
rtype="s3",
order=10,
at=True,
required=True,
example="@1441 sun",
default="@1441 sun",
text="The schedule of full backups. sync_update actions are triggered according to the resource 'schedule' parameter, and do a full backup if the current date matches the 'full_schedule' parameter or an incremental backup otherwise."
)
class KeywordSyncZfsSnapRecursive(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="recursive",
rtype="zfssnap",
order=10,
at=True,
required=False,
example="true",
default=True,
text="Set to true to snap recursively the datasets."
)
class KeywordSyncZfsSnapName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="name",
rtype="zfssnap",
order=10,
at=True,
required=False,
example="weekly",
text="A name included in the snapshot name to avoid retention conflicts between multiple zfs snapshot resources. A full snapshot name is formatted as ..snap.. Example: data.weekly.snap.2016-03-09.10:09:52"
)
class KeywordSyncZfsSnapDataset(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dataset",
rtype="zfssnap",
order=10,
at=True,
required=True,
example="svc1fs/data svc1fs/log",
text="A whitespace separated list of datasets to snapshot."
)
class KeywordSyncZfsSnapKeep(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="keep",
rtype="zfssnap",
order=10,
at=True,
required=True,
default=3,
example="3",
text="The maximum number of snapshots to retain."
)
class KeywordSyncBtrfsSnapName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="name",
rtype="btrfssnap",
order=10,
at=True,
required=False,
example="weekly",
text="A name included in the snapshot name to avoid retention conflicts between multiple btrfs snapshot resources. A full snapshot name is formatted as ..snap.. Example: data.weekly.snap.2016-03-09.10:09:52"
)
class KeywordSyncBtrfsSnapSubvol(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="subvol",
rtype="btrfssnap",
order=10,
at=True,
required=True,
example="svc1fs:data svc1fs:log",
text="A whitespace separated list of : to snapshot."
)
class KeywordSyncBtrfsSnapKeep(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="keep",
rtype="btrfssnap",
order=10,
at=True,
required=True,
default=3,
example="3",
text="The maximum number of snapshots to retain."
)
class KeywordSyncBtrfsSrc(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="src",
rtype="btrfs",
order=10,
at=True,
required=True,
text="Source subvolume of the sync."
)
class KeywordSyncBtrfsDst(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dst",
rtype="btrfs",
order=10,
at=True,
required=True,
text="Destination subvolume of the sync."
)
class KeywordSyncBtrfsTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="target",
rtype="btrfs",
order=11,
at=True,
required=True,
default=None,
candidates=["nodes", "drpnodes", "nodes drpnodes"],
text="Destination nodes of the sync."
)
class KeywordSyncBtrfsRecursive(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="recursive",
rtype="btrfs",
order=10,
at=True,
required=False,
default=False,
candidates=[True, False],
text="Also replicate subvolumes in the src tree."
)
class KeywordSyncZfsSrc(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="src",
rtype="zfs",
order=10,
at=True,
required=True,
text="Source dataset of the sync."
)
class KeywordSyncZfsDst(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dst",
rtype="zfs",
order=11,
at=True,
required=True,
text="Destination dataset of the sync."
)
class KeywordSyncZfsTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="target",
rtype="zfs",
order=12,
required=True,
candidates=['nodes', 'drpnodes', 'nodes drpnodes'],
text="Describes which nodes should receive this data sync from the PRD node where the service is up and running. SAN storage shared 'nodes' must not be sync to 'nodes'. SRDF-like paired storage must not be sync to 'drpnodes'."
)
class KeywordSyncZfsRecursive(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="recursive",
rtype="zfs",
at=True,
order=13,
default=True,
candidates=(True, False),
text="Describes which nodes should receive this data sync from the PRD node where the service is up and running. SAN storage shared 'nodes' must not be sync to 'nodes'. SRDF-like paired storage must not be sync to 'drpnodes'."
)
class KeywordSyncZfsTags(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="tags",
rtype="zfs",
at=True,
text="The zfs sync resource supports the 'delay_snap' tag. This tag is used to delay the snapshot creation just before the sync, thus after 'postsnap_trigger' execution. The default behaviour (no tags) is to group all snapshots creation before copying data to remote nodes, thus between 'presnap_trigger' and 'postsnap_trigger'."
)
class KeywordSyncRsyncSrc(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="src",
rtype="rsync",
order=10,
at=True,
required=True,
text="Source of the sync. Can be a whitespace-separated list of files or dirs passed as-is to rsync. Beware of the meaningful ending '/'. Refer to the rsync man page for details."
)
class KeywordSyncRsyncDst(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dst",
rtype="rsync",
order=11,
required=True,
text="Destination of the sync. Beware of the meaningful ending '/'. Refer to the rsync man page for details."
)
class KeywordSyncRsyncTags(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="tags",
at=True,
rtype="rsync",
text="The sync resource supports the 'delay_snap' tag. This tag is used to delay the snapshot creation just before the rsync, thus after 'postsnap_trigger' execution. The default behaviour (no tags) is to group all snapshots creation before copying data to remote nodes, thus between 'presnap_trigger' and 'postsnap_trigger'."
)
class KeywordSyncRsyncExclude(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="exclude",
rtype="rsync",
at=True,
text="A whitespace-separated list of --exclude params passed unchanged to rsync. The 'options' keyword is preferred now."
)
class KeywordSyncRsyncOptions(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="options",
at=True,
rtype="rsync",
text="A whitespace-separated list of params passed unchanged to rsync. Typical usage is ACL preservation activation."
)
class KeywordSyncRsyncTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="target",
rtype="rsync",
order=12,
required=True,
candidates=['nodes', 'drpnodes', 'nodes drpnodes'],
text="Describes which nodes should receive this data sync from the PRD node where the service is up and running. SAN storage shared 'nodes' must not be sync to 'nodes'. SRDF-like paired storage must not be sync to 'drpnodes'."
)
class KeywordSyncRsyncSnap(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="snap",
rtype="rsync",
at=True,
order=14,
candidates=(True, False),
default=False,
text="If set to true, OpenSVC will try to snapshot the first snapshottable parent of the source of the sync and try to sync from the snap."
)
class KeywordSyncRsyncDstfs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dstfs",
rtype="rsync",
order=13,
text="If set to a remote mount point, OpenSVC will verify that the specified mount point is really hosting a mounted FS. This can be used as a safety net to not overflow the parent FS (may be root)."
)
class KeywordSyncRsyncBwlimit(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="sync",
keyword="bwlimit",
rtype="rsync",
text="Bandwidth limit in KB applied to this rsync transfer. Leave empty to enforce no limit. Takes precedence over 'bwlimit' set in [DEFAULT]."
)
class KeywordSyncSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="schedule",
default=None,
at=True,
text="Set the this resource synchronization schedule. See usr/share/doc/node.conf for the schedule syntax reference.",
example='["00:00-01:00@61 mon", "02:00-03:00@61 tue-sun"]'
)
class KeywordSyncSyncMaxDelay(KeywordInteger):
def __init__(self):
KeywordInteger.__init__(
self,
section="sync",
keyword="sync_max_delay",
default=1440,
text="Unit is minutes. This sets to delay above which the sync status of the resource is to be considered down. Should be set according to your application service level agreement. The cron job frequency should be set between 'sync_min_delay' and 'sync_max_delay'."
)
class KeywordIpIpname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="ipname",
order=12,
at=True,
required=False,
text="The DNS name or IP address of the ip resource. Can be different from one node to the other, in which case '@nodename' can be specified. This is most useful to specify a different ip when the service starts in DRP mode, where subnets are likely to be different than those of the production datacenter. With the amazon driver, the special value tells the provisioner to assign a new private address."
)
class KeywordIpDnsNameSuffix(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="dns_name_suffix",
order=12,
at=True,
required=False,
text="Add the value as a suffix to the DNS record name. The record created is thus formatted as -..."
)
class KeywordIpNetwork(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="network",
order=12,
at=True,
required=False,
example="10.0.0.0",
text="The network, in dotted notation, from where the ip provisioner allocates. Also used by the docker ip driver to delete the network route if del_net_route is set to true.",
)
class KeywordIpDnsUpdate(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="dns_update",
order=12,
at=True,
default=False,
required=False,
candidates=[True, False],
text="Setting this parameter triggers a DNS update. The record created is formatted as .., unless dns_record_name is specified."
)
class KeywordIpZone(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="zone",
order=12,
at=True,
required=False,
text="The zone name the ip resource is linked to. If set, the ip is plumbed from the global in the zone context.",
example="zone1"
)
class KeywordIpDockerContainerRid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="docker",
keyword="container_rid",
order=12,
at=True,
required=True,
text="The docker container resource id to plumb the ip into.",
example="container#0"
)
class KeywordIpAmazonEip(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="amazon",
keyword="eip",
order=12,
at=True,
required=False,
text="The public elastic ip to associate to . The special value tells the provisioner to assign a new public address.",
example="52.27.90.63"
)
class KeywordIpAmazonCascadeAllocation(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="amazon",
keyword="cascade_allocation",
provisioning=True,
order=13,
at=True,
required=False,
text="Set new allocated ip as value to other ip resources ipname parameter. The syntax is a whitespace separated list of .ipname[@].",
example="ip#1.ipname ip#1.ipname@nodes"
)
class KeywordIpAmazonDockerDaemonIp(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="amazon",
keyword="docker_daemon_ip",
provisioning=True,
order=13,
at=False,
candidates=[True, False],
required=False,
text="Set new allocated ip as value as a '--ip ' argument in the DEFAULT.docker_daemon_args parameter.",
example="True"
)
class KeywordDiskPrKey(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="prkey",
order=15,
at=True,
required=False,
text="Defines a specific persistent reservation key for the resource. Takes priority over the service-level defined prkey and the node.conf specified prkey."
)
class KeywordDiskGceNames(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="names",
provisioning=False,
order=1,
at=True,
required=True,
text="Set the gce disk names",
example="svc1-disk1"
)
class KeywordDiskGceZone(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="gce_zone",
provisioning=False,
order=2,
at=True,
required=True,
text="Set the gce zone",
example="europe-west1-b"
)
class KeywordDiskGceDescription(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="description",
provisioning=True,
order=5,
at=True,
required=False,
default=True,
text="An optional, textual description for the disks being created.",
example="foo"
)
class KeywordDiskGceImage(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="image",
provisioning=True,
order=5,
at=True,
required=False,
default=True,
text="An image to apply to the disks being created. When using this option, the size of the disks must be at least as large as the image size.",
example="centos-7"
)
class KeywordDiskGceImageProject(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="image_project",
provisioning=True,
order=5,
at=True,
required=False,
default=True,
text="The project against which all image references will be resolved.",
example="myprj"
)
class KeywordDiskGceSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="size",
provisioning=True,
order=3,
at=True,
required=False,
default=True,
text="Indicates the size of the disks. The OpenSVC size converter is used to produce gce compatible size, so k, K, kib, KiB, kb, KB, ki, Ki and all their g, t, p, e variants are supported.",
example="20g"
)
class KeywordDiskGceSourceSnapshot(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="source_snapshot",
provisioning=True,
order=5,
at=True,
required=False,
default=True,
text="A source snapshot used to create the disks. It is safe to delete a snapshot after a disk has been created from the snapshot. In such cases, the disks will no longer reference the deleted snapshot. When using this option, the size of the disks must be at least as large as the snapshot size.",
example="mysrcsnap"
)
class KeywordDiskGceDiskType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="gce",
keyword="disk_type",
provisioning=True,
order=5,
at=True,
required=False,
default=True,
text="Specifies the type of disk to create. To get a list of available disk types, run 'gcloud compute disk-types list'. The default disk type is pd-standard.",
example="pd-standard"
)
class KeywordIpGceRoutename(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="gce",
keyword="routename",
provisioning=False,
order=13,
at=True,
required=False,
text="Set the gce route name",
example="rt-ip1"
)
class KeywordIpGceZone(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="gce",
keyword="gce_zone",
provisioning=False,
order=13,
at=True,
required=False,
text="Set the gce ip route next hop zone",
example="europe-west1-b"
)
class KeywordIpType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="type",
at=True,
candidates=[None, 'crossbow', 'amazon', 'docker', 'gce'],
text="The opensvc ip driver name.",
required=False,
order=10,
example="crossbow",
)
class KeywordIpIpdev(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="ipdev",
order=11,
at=True,
required=True,
text="The interface name over which OpenSVC will try to stack the service ip. Can be different from one node to the other, in which case the '@nodename' can be specified."
)
class KeywordIpIpdevext(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="crossbow",
keyword="ipdevext",
order=12,
at=True,
required=False,
example="v4",
text="The interface name extension for crossbow ipadm configuration."
)
class KeywordIpDelNetRoute(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
rtype="docker",
keyword="del_net_route",
order=12,
at=True,
required=False,
example="true",
text="Some docker ip configuration requires dropping the network route autoconfigured when installing the ip address. In this case set this parameter to true, and also set the network parameter."
)
class KeywordIpNetmask(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="netmask",
at=True,
order=13,
text="If an ip is already plumbed on the root interface (in which case the netmask is deduced from this ip). Mandatory if the interface is dedicated to the service (dummy interface are likely to be in this case). The format is either dotted or octal for IPv4, ex: 255.255.252.0 or 22, and octal for IPv6, ex: 64.",
example="255.255.255.0"
)
class KeywordIpGateway(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="ip",
keyword="gateway",
at=True,
order=14,
required=False,
text="A zone ip provisioning parameter used in the sysidcfg formatting. The format is decimal for IPv4, ex: 255.255.252.0, and octal for IPv6, ex: 64.",
provisioning=True
)
class KeywordDiskType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="type",
at=True,
order=9,
required=False,
default="vg",
candidates=['disk', 'veritas', 'raw', 'rados', 'md', 'drbd', 'loop', 'zpool', 'pool', 'raw', 'vmdg', 'vdisk', 'lvm', 'vg', 'amazon', 'gce'],
text="The volume group driver to use. Leave empty to activate the native volume group manager."
)
class KeywordDiskDiskDiskId(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="disk",
keyword="disk_id",
order=10,
at=True,
required=False,
text="The wwn of the disk.",
example="6589cfc00000097484f0728d8b2118a6"
)
class KeywordDiskDiskSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="disk",
keyword="size",
order=11,
at=True,
required=False,
provisioning=True,
text="The size of the disk to provision.",
example="15g"
)
class KeywordDiskDiskArray(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="disk",
keyword="array",
order=11,
at=True,
required=False,
provisioning=True,
text="The array to provision the disk from.",
example="xtremio-prod1"
)
class KeywordDiskDiskDiskGroup(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="disk",
keyword="diskgroup",
order=11,
at=True,
required=False,
provisioning=True,
text="The array disk group to provision the disk from.",
example="default"
)
class KeywordDiskDiskSlo(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="disk",
keyword="slo",
order=11,
at=True,
required=False,
provisioning=True,
text="The provisionned disk service level objective. This keyword is honored on arrays supporting this (ex: EMC VMAX)",
example="Optimized"
)
class KeywordDiskAmazonVolumes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="amazon",
keyword="volumes",
order=10,
at=True,
required=True,
text="A whitespace separated list of amazon volumes. Any member of the list can be set to a special value. In this case the provisioner will allocate a new volume with the specified characteristics and replace this member with the allocated volume id. The supported keys are the same as those supported by the awscli ec2 create-volume command: size, iops, availability-zone, snapshot, type and encrypted.",
example="vol-123456 vol-654321"
)
class KeywordDiskRawDevs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="raw",
keyword="devs",
order=10,
at=True,
required=True,
text="a list of device paths or : device paths mappings, whitespace separated. Those devices are owned by the service and scsi reservation policy is applied to them.",
example="/dev/mapper/svc.d0:/dev/oracle/redo001 /dev/mapper/svc.d1"
)
class KeywordDiskRawZone(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="raw",
keyword="zone",
order=12,
at=True,
required=False,
text="The zone name the raw resource is linked to. If set, the raw files are configured from the global reparented to the zonepath.",
example="zone1"
)
class KeywordDiskRawCreateCharDevices(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="raw",
keyword="create_char_devices",
order=10,
at=True,
required=False,
default=True,
text="On Linux, char devices are not automatically created when devices are discovered. If set to True (the default), the raw resource driver will create and delete them using the raw kernel driver.",
example="false"
)
class KeywordDiskRawUser(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="user",
rtype="raw",
order=11,
at=True,
required=True,
example="root",
text="The user that should be owner of the device. Either in numeric or symbolic form."
)
class KeywordDiskRawGroup(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="group",
rtype="raw",
order=11,
at=True,
required=False,
example="sys",
text="The group that should be owner of the device. Either in numeric or symbolic form."
)
class KeywordDiskRawPerm(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="perm",
rtype="raw",
order=11,
at=True,
required=False,
example="600",
text="The permissions the device should have. A string representing the octal permissions."
)
class KeywordDiskVgname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["lvm", "vg"],
keyword="vgname",
order=10,
required=True,
text="The name of the volume group"
)
class KeywordDiskVgName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["lvm", "vg"],
keyword="name",
order=10,
required=True,
text="The name of the volume group"
)
class KeywordDiskOptions(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["lvm", "vg"],
keyword="options",
default="",
required=False,
provisioning=True,
text="The vgcreate options to use upon vg provisioning."
)
class KeywordDiskMdUuid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=True,
at=True,
keyword="uuid",
rtype="md",
text="The md uuid to use with mdadm assemble commands"
)
class KeywordDiskMdDevs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=True,
at=True,
keyword="devs",
rtype="md",
provisioning=True,
example="/dev/rbd0 /dev/rbd1",
text="The md member devices to use with mdadm create command"
)
class KeywordDiskMdLevel(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=True,
at=True,
keyword="level",
rtype="md",
provisioning=True,
example="raid1",
text="The md raid level to use with mdadm create command (see mdadm man for values)"
)
class KeywordDiskMdLayout(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=False,
at=True,
keyword="layout",
rtype="md",
provisioning=True,
text="The md raid layout to use with mdadm create command (see mdadm man for values)"
)
class KeywordDiskMdChunk(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=False,
at=True,
keyword="chunk",
rtype="md",
provisioning=True,
example="128k",
text="The md chunk size to use with mdadm create command. Values are converted to kb and rounded to 4."
)
class KeywordDiskMdSpares(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
required=False,
at=True,
keyword="spares",
rtype="md",
provisioning=True,
example="0",
text="The md number of spare devices to use with mdadm create command"
)
class KeywordDiskMdShared(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="shared",
candidates=(True, False),
at=True,
rtype="md",
text="Trigger additional checks on the passive nodes. If not specified, the shared parameter defaults to True if no multiple nodes and drpnodes are defined and no md section parameter use scoping."
)
class KeywordDiskClientId(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="client_id",
rtype="rados",
text="Client id to use for authentication with the rados servers"
)
class KeywordDiskKeyring(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="keyring",
required=False,
rtype="rados",
text="keyring to look for the client id secret for authentication with the rados servers"
)
class KeywordDiskLock(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="lock",
candidates=["exclusive", "shared", "None"],
rtype="rados",
text="Locking mode for the rados images"
)
class KeywordDiskLockSharedTag(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="lock_shared_tag",
rtype="rados",
depends=[('lock', ['shared'])],
text="The tag to use upon rados image locking in shared mode"
)
class KeywordDiskImageFormat(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="rados",
keyword="image_format",
provisioning=True,
default="2",
text="The rados image format"
)
class KeywordDiskSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="rados",
keyword="size",
provisioning=True,
text="The rados image size in MB"
)
class KeywordDiskImages(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="rados",
keyword="images",
text="The rados image names handled by this vg resource. whitespace separated."
)
class KeywordDiskDsf(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["lvm", "vg"],
keyword="dsf",
candidates=(True, False),
default=True,
text="HP-UX only. 'dsf' must be set to false for LVM to use never-multipathed /dev/dsk/... devices. Otherwize, ad-hoc multipathed /dev/disk/... devices."
)
class KeywordDiskScsireserv(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
keyword="scsireserv",
default=False,
candidates=(True, False),
text="If set to 'true', OpenSVC will try to acquire a type-5 (write exclusive, registrant only) scsi3 persistent reservation on every path to disks of every disk group attached to this service. Existing reservations are preempted to not block service start-up. If the start-up was not legitimate the data are still protected from being written over from both nodes. If set to 'false' or not set, 'scsireserv' can be activated on a per-resource basis."
)
class KeywordDiskPvs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["lvm", "vg"],
keyword="pvs",
required=True,
text="The list of paths to the physical volumes of the volume group.",
provisioning=True
)
class KeywordZPoolVdev(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["zpool", "pool"],
keyword="vdev",
order=11,
at=True,
provisioning=True,
text="The vdev list, including optional parity keywords, as would be passed to zpool create."
)
class KeywordZPoolName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["zpool", "pool"],
keyword="name",
order=10,
at=True,
text="The name of the zfs pool"
)
class KeywordZPoolPoolname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype=["zpool", "pool"],
keyword="poolname",
order=10,
at=True,
text="The name of the zfs pool"
)
class KeywordVmdgContainerid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="vmdg",
keyword="container_id",
at=True,
required=False,
text="The id of the container whose configuration to extract the disk mapping from."
)
class KeywordDiskDrbdRes(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="drbd",
keyword="res",
order=11,
text="The name of the drbd resource associated with this service resource. OpenSVC expect the resource configuration file to reside in '/etc/drbd.d/resname.res'. The 'sync#i0' resource will take care of replicating this file to remote nodes."
)
class KeywordShareType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="share",
keyword="type",
candidates=["nfs"],
text="The type of share.",
required=True,
order=1
)
class KeywordSharePath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="share",
keyword="path",
rtype="nfs",
order=10,
at=True,
required=True,
text="The fullpath of the directory to share."
)
class KeywordShareNfsOpts(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="share",
keyword="opts",
rtype="nfs",
order=11,
at=True,
required=True,
text="The NFS share export options, as they woud be set in /etc/exports or passed to Solaris share command."
)
class KeywordFsDev(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="dev",
order=11,
at=True,
required=True,
text="The block device file or filesystem image file hosting the filesystem to mount. Different device can be set up on different nodes using the dev@nodename syntax"
)
class KeywordFsZone(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="zone",
order=11,
at=True,
required=False,
text="The zone name the fs refers to. If set, the fs mount point is reparented into the zonepath rootfs."
)
class KeywordFsVg(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="vg",
required=True,
text="The name of the disk group the filesystem device should be provisioned from.",
provisioning=True
)
class KeywordFsSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="size",
required=True,
text="The size in MB of the logical volume to provision for this filesystem.",
provisioning=True
)
class KeywordFsMnt(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="mnt",
order=12,
required=True,
text="The mount point where to mount the filesystem."
)
class KeywordFsMntOpt(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="mnt_opt",
order=13,
at=True,
text="The mount options."
)
class KeywordFsMkfsOpt(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="mkfs_opt",
provisioning=True,
required=False,
at=True,
order=13,
text="Eventual mkfs additional options."
)
class KeywordFsType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="type",
order=14,
required=True,
strict_candidates=False,
candidates=["directory"],
text="The filesystem type or 'directory'. Used to determine the fsck command to use."
)
class KeywordFsSnapSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="snap_size",
order=14,
text="If this filesystem is build on a snapable logical volume or is natively snapable (jfs, vxfs, ...) this setting overrides the default 10% of the filesystem size to compute the snapshot size. The snapshot is created by snap-enabled rsync-type sync resources. The unit is Megabytes."
)
class KeywordFsDirPath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="path",
rtype="directory",
order=10,
at=True,
required=True,
text="The fullpath of the directory to create."
)
class KeywordFsDirUser(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="user",
rtype="directory",
order=11,
at=True,
required=False,
example="root",
text="The user that should be owner of the directory. Either in numeric or symbolic form."
)
class KeywordFsDirGroup(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="group",
rtype="directory",
order=11,
at=True,
required=False,
example="sys",
text="The group that should be owner of the directory. Either in numeric or symbolic form."
)
class KeywordFsDirPerm(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="fs",
keyword="perm",
rtype="directory",
order=11,
at=True,
required=False,
example="1777",
text="The permissions the directory should have. A string representing the octal permissions."
)
class KeywordLoopSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="loop",
keyword="size",
at=True,
required=True,
default=10,
text="The size of the loop file to provision.",
provisioning=True
)
class KeywordLoopFile(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="loop",
at=True,
keyword="file",
required=True,
text="The file hosting the disk image to map."
)
class KeywordSyncNetappFiler(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="filer",
rtype="netapp",
required=True,
at=True,
text="The Netapp filer resolvable host name used by the node. Different filers can be set up for each node using the filer@nodename syntax."
)
class KeywordSyncNetappPath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="path",
rtype="netapp",
required=True,
text="Specifies the volume or qtree to drive snapmirror on."
)
class KeywordSyncNetappUser(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="user",
rtype="netapp",
required=True,
default="nasadm",
text="Specifies the user used to ssh connect the filers. Nodes should be trusted by keys to access the filer with this user."
)
class KeywordSyncIbmdssnapPairs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="pairs",
at=True,
rtype="ibmdssnap",
required=True,
text="Whitespace-separated list of device pairs.",
example="0065:0073 0066:0074"
)
class KeywordSyncIbmdssnapArray(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="array",
at=True,
rtype="ibmdssnap",
required=True,
text="The name of the array holding the source devices and their paired devices.",
example="IBM.2243-12ABC00"
)
class KeywordSyncIbmdssnapBgcopy(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="bgcopy",
at=True,
rtype="ibmdssnap",
candidates=[True, False],
required=True,
text="Initiate a background copy of the source data block to the paired devices upon resync."
)
class KeywordSyncIbmdssnapRecording(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="recording",
at=True,
rtype="ibmdssnap",
candidates=[True, False],
required=True,
text="Track only changed data blocks instead of copying the whole source data to the paired devices."
)
class KeywordSyncNexentaName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="name",
at=True,
rtype="nexenta",
required=True,
text="The name of the Nexenta autosync configuration."
)
class KeywordSyncNexentaFiler(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="filer",
at=True,
rtype="nexenta",
required=True,
text="The name of the Nexenta local head. Must be set for each node using the scoping syntax."
)
class KeywordSyncNexentaPath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="path",
at=True,
rtype="nexenta",
required=True,
text="The path of the zfs to synchronize, as seen by the Nexenta heads."
)
class KeywordSyncNexentaReversible(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="reversible",
at=True,
rtype="nexenta",
candidates=[True, False],
required=True,
text="Defines if the replication link can be reversed. Set to no for prd to drp replications to protect production data."
)
class KeywordSyncHp3parSnapArray(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="array",
rtype="hp3parsnap",
required=True,
at=True,
text="Name of the HP 3par array to send commands to."
)
class KeywordSyncHp3parSnapVvnames(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="vv_names",
rtype="hp3parsnap",
required=True,
at=True,
text="The names of snapshot VV or sets of VV to update."
)
class KeywordSyncHp3parArray(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="array",
rtype="hp3par",
required=True,
at=True,
text="Name of the HP 3par array to send commands to."
)
class KeywordSyncHp3parMode(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="mode",
rtype="hp3par",
required=True,
candidates=["async", "sync"],
default="async",
text="Replication mode: Synchronous or Asynchronous"
)
class KeywordSyncHp3parMethod(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="method",
rtype="hp3par",
required=False,
candidates=["ssh", "cli"],
default="ssh",
at=True,
text="The method to use to submit commands to the arrays."
)
class KeywordSyncHp3parRcg(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="rcg",
rtype="hp3par",
required=True,
at=True,
text="Name of the HP 3par remote copy group. The scoping syntax must be used to fully describe the replication topology."
)
class KeywordSyncDcsckptDcs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dcs",
rtype="dcsckpt",
required=True,
text="Whitespace-separated list of DataCore heads, as seen by the manager."
)
class KeywordSyncDcsckptManager(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="manager",
rtype="dcsckpt",
required=True,
text="The DataCore manager name running a ssh daemon, as set in the auth.conf section title."
)
class KeywordSyncDcsckptPairs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="pairs",
rtype="dcsckpt",
required=True,
text="A json-formatted list of dictionaries representing the source and destination device pairs. Each dictionary must have the 'src', 'dst_ckpt' keys."
)
class KeywordSyncDcssnapDcs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dcs",
rtype="dcssnap",
required=True,
text="Whitespace-separated list of DataCore heads, as seen by the manager."
)
class KeywordSyncDcssnapManager(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="manager",
rtype="dcssnap",
required=True,
text="The DataCore manager name running a ssh daemon, as set in the auth.conf section title."
)
class KeywordSyncDcssnapSnapname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="snapname",
rtype="dcssnap",
required=True,
text="Whitespace-separated list of snapshot device names, as seen by the DataCore manager."
)
class KeywordSyncEvasnapEvaname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="eva_name",
rtype="evasnap",
required=True,
text="Name of the HP EVA array hosting the source and snapshot devices."
)
class KeywordSyncEvasnapSnapname(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="snap_name",
rtype="evasnap",
required=True,
text="Name of the snapshot objectname as seen in sssu."
)
class KeywordSyncEvasnapPairs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="pairs",
rtype="evasnap",
required=True,
text="A json-formatted list of dictionaries representing the device pairs. Each dict must have the 'src', 'dst' and 'mask' keys. The mask key value is a list of \\\\ strings."
)
class KeywordSyncNecismsnapArray(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="array",
rtype="necism",
required=True,
text="Name of the NEC ISM array to send commands to."
)
class KeywordSyncNecismsnapDevs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="devs",
rtype="necism",
required=True,
text="A whitespace-separated list of SV:LD."
)
class KeywordSyncSymSrdfsSymid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="symid",
at=True,
rtype="symsrdfs",
required=True,
text="Id of the local symmetrix array hosting the symdg. This parameter is usually scoped to define different array ids for different nodes."
)
class KeywordSyncSymSrdfsSymdg(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="symdg",
at=False,
rtype="symsrdfs",
required=True,
text="Name of the symmetrix device group where the source and target devices are grouped."
)
class KeywordSyncSymSrdfsRdfg(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="rdfg",
at=False,
rtype="symsrdfs",
required=True,
text="Name of the RDF group pairing the source and target devices."
)
class KeywordSyncSymclonePrecopy(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="precopy",
at=True,
rtype="symclone",
required=False,
default=True,
text="Use -precopy on recreate."
)
class KeywordSyncSymcloneSymid(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="symid",
rtype=["symclone", "symsnap"],
required=True,
text="Identifier of the symmetrix array hosting the source and target devices pairs pointed by 'pairs'."
)
class KeywordSyncSymclonePairs(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="pairs",
rtype=["symclone", "symsnap"],
required=True,
at=True,
default=None,
text="Whitespace-separated list of devices : devid pairs to drive with this resource.",
example="00B60:00B61 00B62:00B63",
)
class KeywordSyncSymcloneConsistent(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="consistent",
rtype=["symclone", "symsnap"],
at=True,
default=True,
text="Use -consistent in symclone commands.",
)
class KeywordSyncDdsSrc(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="src",
rtype="dds",
required=True,
text="Points the origin of the snapshots to replicate from."
)
class KeywordSyncDdsDst(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="dst",
rtype="dds",
at=True,
required=True,
text="Target file or block device. Optional. Defaults to src. Points the media to replay the binary-delta received from source node to. This media must have a size superior or equal to source."
)
class KeywordSyncDdsTarget(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="target",
rtype="dds",
required=True,
candidates=['nodes', 'drpnodes', 'nodes drpnodes'],
text="Accepted values are 'drpnodes', 'nodes' or both, whitespace-separated. Points the target nodes to replay the binary-deltas on. Be warned that starting the service on a target node without a 'stop-sync_update-start cycle, will break the synchronization, so this mode is usually restricted to drpnodes sync, and should not be used to replicate data between nodes with automated services failover."
)
class KeywordSyncDdsSnapSize(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="sync",
keyword="snap_size",
rtype="dds",
text="Default to 10% of origin. In MB, rounded to physical extent boundaries by lvm tools. Size of the snapshots created by OpenSVC to extract binary deltas from. Opensvc creates at most 2 snapshots : one short-lived to gather changed data from, and one long-lived to gather changed chunks list from. Volume groups should have the necessary space always available."
)
class KeywordVdiskPath(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="disk",
rtype="vdisk",
keyword="path",
required=True,
at=True,
text="Path of the device or file used as a virtual machine disk. The path@nodename can be used to to set up different path on each node."
)
class KeywordHbType(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="hb",
keyword="type",
required=True,
candidates=('openha', 'linuxha'),
text="Specify the heartbeat driver to use."
)
class KeywordHbName(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="hb",
keyword="name",
rtype="openha",
text="Specify the service name used by the heartbeat. Defaults to the service name."
)
class KeywordTaskCommand(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="task",
keyword="command",
at=True,
order=1,
required=True,
text="The command to execute on 'run' action and at scheduled interval. The default schedule for tasks is @0.",
example="/srv/{svcname}/data/scripts/backup.sh"
)
class KeywordTaskConfirmation(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="task",
keyword="confirmation",
at=True,
order=1,
default=False,
required=False,
candidates=(True, False),
text="If set to True, ask for an interactive confirmation to run the task. This flag can be used for dangerous tasks like data-restore.",
)
class KeywordTaskOnError(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="task",
keyword="on_error",
at=True,
order=1,
required=False,
text="A command to execute on 'run' action if 'command' returned an error.",
example="/srv/{svcname}/data/scripts/task_on_error.sh"
)
class KeywordTaskUser(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="task",
keyword="user",
at=True,
order=2,
required=False,
text="The user to impersonate when running the task command. The default user is root.",
example="admin"
)
class KeywordTaskSchedule(Keyword):
def __init__(self):
Keyword.__init__(
self,
section="task",
keyword="schedule",
default="@0",
at=True,
text="Set the this task run schedule. See usr/share/doc/node.conf for the schedule syntax reference.",
example='["00:00-01:00@61 mon", "02:00-03:00@61 tue-sun"]'
)
class KeyDict(KeywordStore):
def __init__(self, provision=False):
KeywordStore.__init__(self, provision)
import os
def kw_tags(resource):
return Keyword(
section=resource,
keyword="tags",
generic=True,
at=True,
candidates=None,
default=None,
text="A list of tags. Arbitrary tags can be used to limit action scope to resources with a specific tag. Some tags can influence the driver behaviour. For example the 'encap' tag assigns the resource to the encapsulated service, 'noaction' avoids any state changing action from the driver, 'nostatus' forces the status to n/a."
)
def kw_subset(resource):
return Keyword(
section=resource,
keyword="subset",
generic=True,
at=True,
default=None,
text="Assign the resource to a specific subset."
)
def kw_restart(resource):
return Keyword(
section=resource,
keyword="restart",
generic=True,
at=True,
default=0,
text="The agent will try to restart a resource n times before falling back to the monitor action."
)
def kw_monitor(resource):
return Keyword(
section=resource,
keyword="monitor",
generic=True,
at=True,
candidates=(True, False),
default=False,
text="A monitored resource will trigger a node suicide if the service has a heartbeat resource in up state"
)
def kw_disable(resource):
return Keyword(
section=resource,
keyword="disable",
generic=True,
at=True,
candidates=(True, False),
default=False,
text="A disabled resource will be ignored on service startup and shutdown."
)
def kw_disable_on(resource):
return Keyword(
section=resource,
keyword="disable_on",
generic=True,
default=[],
text="A whitelist-separated list of nodes to disable the resource on. A disabled resource will be ignored on service startup and shutdown."
)
def kw_enable_on(resource):
return Keyword(
section=resource,
keyword="enable_on",
generic=True,
default=[],
text="A whitelist-separated list of nodes to enable the resource on. Takes precedence over disable and disable_on."
)
def kw_optional(resource):
return Keyword(
section=resource,
keyword="optional",
generic=True,
at=True,
candidates=(True, False),
default=False,
text="Possible values are 'true' or 'false'. Actions on resource will be tried upon service startup and shutdown, but action failures will be logged and passed over. Useful for resources like dump filesystems for example."
)
def kw_always_on(resource):
return Keyword(
section=resource,
keyword="always_on",
generic=True,
candidates=['nodes', 'drpnodes', 'nodes drpnodes'],
text="Possible values are 'nodes', 'drpnodes' or 'nodes drpnodes', or a list of nodes. Sets the nodes on which the resource is always kept up. Primary usage is file synchronization receiving on non-shared disks. Don't set this on shared disk !! danger !!"
)
def kw_pre_unprovision(resource):
return Keyword(
section=resource,
keyword="pre_unprovision",
generic=True,
at=True,
text="A command or script to execute before the resource unprovision action. Errors do not interrupt the action."
)
def kw_post_unprovision(resource):
return Keyword(
section=resource,
keyword="post_unprovision",
generic=True,
at=True,
text="A command or script to execute after the resource unprovision action. Errors do not interrupt the action."
)
def kw_pre_provision(resource):
return Keyword(
section=resource,
keyword="pre_provision",
generic=True,
at=True,
text="A command or script to execute before the resource provision action. Errors do not interrupt the action."
)
def kw_post_provision(resource):
return Keyword(
section=resource,
keyword="post_provision",
generic=True,
at=True,
text="A command or script to execute after the resource provision action. Errors do not interrupt the action."
)
def kw_pre_start(resource):
return Keyword(
section=resource,
keyword="pre_start",
generic=True,
at=True,
text="A command or script to execute before the resource start action. Errors do not interrupt the action."
)
def kw_post_start(resource):
return Keyword(
section=resource,
keyword="post_start",
generic=True,
at=True,
text="A command or script to execute after the resource start action. Errors do not interrupt the action."
)
def kw_pre_stop(resource):
return Keyword(
section=resource,
keyword="pre_stop",
generic=True,
at=True,
text="A command or script to execute before the resource stop action. Errors do not interrupt the action."
)
def kw_post_stop(resource):
return Keyword(
section=resource,
keyword="post_stop",
generic=True,
at=True,
text="A command or script to execute after the resource stop action. Errors do not interrupt the action."
)
def kw_pre_sync_nodes(resource):
return Keyword(
section=resource,
keyword="pre_sync_nodes",
generic=True,
at=True,
text="A command or script to execute before the resource sync_nodes action. Errors do not interrupt the action."
)
def kw_post_sync_nodes(resource):
return Keyword(
section=resource,
keyword="post_sync_nodes",
generic=True,
at=True,
text="A command or script to execute after the resource sync_nodes action. Errors do not interrupt the action."
)
def kw_pre_sync_drp(resource):
return Keyword(
section=resource,
keyword="pre_sync_drp",
generic=True,
at=True,
text="A command or script to execute before the resource sync_drp action. Errors do not interrupt the action."
)
def kw_post_sync_drp(resource):
return Keyword(
section=resource,
keyword="post_sync_drp",
generic=True,
at=True,
text="A command or script to execute after the resource sync_drp action. Errors do not interrupt the action."
)
def kw_pre_sync_resync(resource):
return Keyword(
section=resource,
keyword="pre_sync_resync",
generic=True,
at=True,
text="A command or script to execute before the resource sync_resync action. Errors do not interrupt the action."
)
def kw_post_sync_resync(resource):
return Keyword(
section=resource,
keyword="post_sync_resync",
generic=True,
at=True,
text="A command or script to execute after the resource sync_resync action. Errors do not interrupt the action."
)
def kw_pre_sync_update(resource):
return Keyword(
section=resource,
keyword="pre_sync_update",
generic=True,
at=True,
text="A command or script to execute before the resource sync_update action. Errors do not interrupt the action."
)
def kw_post_sync_update(resource):
return Keyword(
section=resource,
keyword="post_sync_update",
generic=True,
at=True,
text="A command or script to execute after the resource sync_update action. Errors do not interrupt the action."
)
def kw_pre_run(resource):
return Keyword(
section=resource,
keyword="pre_run",
generic=True,
at=True,
text="A command or script to execute before the resource run action. Errors do not interrupt the action."
)
def kw_post_run(resource):
return Keyword(
section=resource,
keyword="post_run",
generic=True,
at=True,
text="A command or script to execute after the resource run action. Errors do not interrupt the action."
)
def kw_blocking_pre_unprovision(resource):
return Keyword(
section=resource,
keyword="blocking_pre_unprovision",
generic=True,
at=True,
text="A command or script to execute before the resource unprovision action. Errors interrupt the action."
)
def kw_blocking_post_unprovision(resource):
return Keyword(
section=resource,
keyword="blocking_post_unprovision",
generic=True,
at=True,
text="A command or script to execute after the resource unprovision action. Errors interrupt the action."
)
def kw_blocking_pre_provision(resource):
return Keyword(
section=resource,
keyword="blocking_pre_provision",
generic=True,
at=True,
text="A command or script to execute before the resource provision action. Errors interrupt the action."
)
def kw_blocking_post_provision(resource):
return Keyword(
section=resource,
keyword="blocking_post_provision",
generic=True,
at=True,
text="A command or script to execute after the resource provision action. Errors interrupt the action."
)
def kw_blocking_pre_start(resource):
return Keyword(
section=resource,
keyword="blocking_pre_start",
generic=True,
at=True,
text="A command or script to execute before the resource start action. Errors interrupt the action."
)
def kw_blocking_post_start(resource):
return Keyword(
section=resource,
keyword="blocking_post_start",
generic=True,
at=True,
text="A command or script to execute after the resource start action. Errors interrupt the action."
)
def kw_blocking_pre_stop(resource):
return Keyword(
section=resource,
keyword="blocking_pre_stop",
generic=True,
at=True,
text="A command or script to execute before the resource stop action. Errors interrupt the action."
)
def kw_blocking_post_stop(resource):
return Keyword(
section=resource,
keyword="blocking_post_stop",
generic=True,
at=True,
text="A command or script to execute after the resource stop action. Errors interrupt the action."
)
def kw_blocking_pre_sync_nodes(resource):
return Keyword(
section=resource,
keyword="blocking_pre_sync_nodes",
generic=True,
at=True,
text="A command or script to execute before the resource sync_nodes action. Errors interrupt the action."
)
def kw_blocking_post_sync_nodes(resource):
return Keyword(
section=resource,
keyword="blocking_post_sync_nodes",
generic=True,
at=True,
text="A command or script to execute after the resource sync_nodes action. Errors interrupt the action."
)
def kw_blocking_pre_sync_drp(resource):
return Keyword(
section=resource,
keyword="blocking_pre_sync_drp",
generic=True,
at=True,
text="A command or script to execute before the resource sync_drp action. Errors interrupt the action."
)
def kw_blocking_post_sync_drp(resource):
return Keyword(
section=resource,
keyword="blocking_post_sync_drp",
generic=True,
at=True,
text="A command or script to execute after the resource sync_drp action. Errors interrupt the action."
)
def kw_blocking_pre_sync_resync(resource):
return Keyword(
section=resource,
keyword="blocking_pre_sync_resync",
generic=True,
at=True,
text="A command or script to execute before the resource sync_resync action. Errors interrupt the action."
)
def kw_blocking_post_sync_resync(resource):
return Keyword(
section=resource,
keyword="blocking_post_sync_resync",
generic=True,
at=True,
text="A command or script to execute after the resource sync_resync action. Errors interrupt the action."
)
def kw_blocking_pre_sync_update(resource):
return Keyword(
section=resource,
keyword="blocking_pre_sync_update",
generic=True,
at=True,
text="A command or script to execute before the resource sync_update action. Errors interrupt the action."
)
def kw_blocking_post_sync_update(resource):
return Keyword(
section=resource,
keyword="blocking_post_sync_update",
generic=True,
at=True,
text="A command or script to execute after the resource sync_update action. Errors interrupt the action."
)
def kw_blocking_pre_run(resource):
return Keyword(
section=resource,
keyword="blocking_pre_run",
generic=True,
at=True,
text="A command or script to execute before the resource run action. Errors interrupt the action."
)
def kw_blocking_post_run(resource):
return Keyword(
section=resource,
keyword="blocking_post_run",
generic=True,
at=True,
text="A command or script to execute after the resource run action. Errors interrupt the action."
)
def kw_requires(section, action):
return Keyword(
section=section,
keyword=action+"_requires",
generic=True,
at=True,
example="ip#0 fs#0(down,stdby down)",
default="",
text="A whitespace-separated list of conditions to meet to accept running a '%s' action. A condition is expressed as (,...). If states are omitted, 'up,stdby up' is used as the default expected states." % action
)
self += kw_disable("DEFAULT")
for r in ["sync", "ip", "fs", "disk", "hb", "share", "container", "app", "task", "stonith"]:
self += kw_restart(r)
self += kw_tags(r)
self += kw_subset(r)
self += kw_monitor(r)
self += kw_disable(r)
self += kw_disable_on(r)
self += kw_enable_on(r)
self += kw_optional(r)
self += kw_always_on(r)
self += kw_pre_unprovision(r)
self += kw_post_unprovision(r)
self += kw_pre_provision(r)
self += kw_post_provision(r)
self += kw_pre_start(r)
self += kw_post_start(r)
self += kw_pre_stop(r)
self += kw_post_stop(r)
self += kw_pre_sync_nodes(r)
self += kw_post_sync_nodes(r)
self += kw_pre_sync_drp(r)
self += kw_post_sync_drp(r)
self += kw_pre_sync_resync(r)
self += kw_post_sync_resync(r)
self += kw_pre_sync_update(r)
self += kw_post_sync_update(r)
self += kw_pre_run(r)
self += kw_post_run(r)
self += kw_blocking_pre_unprovision(r)
self += kw_blocking_post_unprovision(r)
self += kw_blocking_pre_provision(r)
self += kw_blocking_post_provision(r)
self += kw_blocking_pre_start(r)
self += kw_blocking_post_start(r)
self += kw_blocking_pre_stop(r)
self += kw_blocking_post_stop(r)
self += kw_blocking_pre_sync_nodes(r)
self += kw_blocking_post_sync_nodes(r)
self += kw_blocking_pre_sync_drp(r)
self += kw_blocking_post_sync_drp(r)
self += kw_blocking_pre_sync_resync(r)
self += kw_blocking_post_sync_resync(r)
self += kw_blocking_pre_sync_update(r)
self += kw_blocking_post_sync_update(r)
self += kw_blocking_pre_run(r)
self += kw_blocking_post_run(r)
for action in ["unprovision", "provision", "start", "stop",
"sync_nodes", "sync_drp", "sync_update",
"sync_break", "sync_resync", "run"]:
self += kw_requires(r, action)
self += KeywordMode()
self += KeywordLockTimeout()
self += KeywordPrKey()
self += KeywordPkgName()
self += KeywordDockerDaemonPrivate()
self += KeywordDockerExe()
self += KeywordDockerDataDir()
self += KeywordDockerDaemonArgs()
self += KeywordDockerSwarmArgs()
self += KeywordDockerSwarmManagers()
self += KeywordAntiAffinity()
self += KeywordNoPreemptAbort()
self += KeywordShowDisabled()
self += KeywordCluster()
self += KeywordClusterType()
self += KeywordFlexPrimary()
self += KeywordDrpFlexPrimary()
self += KeywordRollback()
self += KeywordStatusSchedule()
self += KeywordCompSchedule()
self += KeywordMonitorSchedule()
self += KeywordResinfoSchedule()
self += KeywordPushSchedule()
self += KeywordFlexMinNodes()
self += KeywordFlexMaxNodes()
self += KeywordFlexCpuMinThreshold()
self += KeywordFlexCpuMaxThreshold()
self += KeywordServiceEnv()
self += KeywordServiceType()
self += KeywordNodes()
self += KeywordAutostartNode()
self += KeywordDrpnode()
self += KeywordDrpnodes()
self += KeywordEncapnodes()
self += KeywordApp()
self += KeywordComment()
self += KeywordScsireserv()
self += KeywordBwlimit()
self += KeywordSyncInterval()
self += KeywordSyncMaxDelay()
self += KeywordPresnapTrigger()
self += KeywordPostsnapTrigger()
self += KeywordMonitorAction()
self += KeywordCreatePg()
self += KeywordPgCpus()
self += KeywordPgMems()
self += KeywordPgCpuShare()
self += KeywordPgCpuQuota()
self += KeywordPgMemOomControl()
self += KeywordPgMemLimit()
self += KeywordPgMemSwappiness()
self += KeywordPgVmemLimit()
self += KeywordPgBlkioWeight()
self += KeywordSyncType()
self += KeywordSyncDockerTarget()
self += KeywordSyncBtrfsSrc()
self += KeywordSyncBtrfsDst()
self += KeywordSyncBtrfsTarget()
self += KeywordSyncBtrfsRecursive()
self += KeywordSyncBtrfsSnapName()
self += KeywordSyncBtrfsSnapSubvol()
self += KeywordSyncBtrfsSnapKeep()
self += KeywordSyncZfsSnapName()
self += KeywordSyncZfsSnapRecursive()
self += KeywordSyncZfsSnapDataset()
self += KeywordSyncZfsSnapKeep()
self += KeywordSyncS3Src()
self += KeywordSyncS3Options()
self += KeywordSyncS3Bucket()
self += KeywordSyncS3FullSchedule()
self += KeywordSyncZfsSrc()
self += KeywordSyncZfsDst()
self += KeywordSyncZfsTarget()
self += KeywordSyncZfsRecursive()
self += KeywordSyncZfsTags()
self += KeywordSyncRsyncSrc()
self += KeywordSyncRsyncDst()
self += KeywordSyncRsyncTags()
self += KeywordSyncRsyncExclude()
self += KeywordSyncRsyncOptions()
self += KeywordSyncRsyncTarget()
self += KeywordSyncRsyncSnap()
self += KeywordSyncRsyncDstfs()
self += KeywordSyncRsyncBwlimit()
self += KeywordDefaultSyncSchedule()
self += KeywordSyncSchedule()
self += KeywordSyncSyncMaxDelay()
self += KeywordIpType()
self += KeywordIpIpname()
self += KeywordIpIpdev()
self += KeywordIpIpdevext()
self += KeywordIpDelNetRoute()
self += KeywordIpNetmask()
self += KeywordIpGateway()
self += KeywordIpDnsUpdate()
self += KeywordIpDnsNameSuffix()
self += KeywordIpNetwork()
self += KeywordIpZone()
self += KeywordIpDockerContainerRid()
self += KeywordIpAmazonEip()
self += KeywordIpAmazonCascadeAllocation()
self += KeywordIpAmazonDockerDaemonIp()
self += KeywordIpGceZone()
self += KeywordIpGceRoutename()
self += KeywordDiskPrKey()
self += KeywordDiskGceNames()
self += KeywordDiskGceZone()
self += KeywordDiskGceDescription()
self += KeywordDiskGceImage()
self += KeywordDiskGceImageProject()
self += KeywordDiskGceSize()
self += KeywordDiskGceSourceSnapshot()
self += KeywordDiskGceDiskType()
self += KeywordDiskType()
self += KeywordDiskDiskDiskId()
self += KeywordDiskDiskSize()
self += KeywordDiskDiskArray()
self += KeywordDiskDiskDiskGroup()
self += KeywordDiskDiskSlo()
self += KeywordDiskAmazonVolumes()
self += KeywordDiskRawDevs()
self += KeywordDiskRawZone()
self += KeywordDiskRawCreateCharDevices()
self += KeywordDiskRawUser()
self += KeywordDiskRawGroup()
self += KeywordDiskRawPerm()
self += KeywordDiskVgname()
self += KeywordDiskVgName()
self += KeywordDiskDsf()
self += KeywordDiskImages()
self += KeywordDiskMdUuid()
self += KeywordDiskMdDevs()
self += KeywordDiskMdLevel()
self += KeywordDiskMdChunk()
self += KeywordDiskMdLayout()
self += KeywordDiskMdSpares()
self += KeywordDiskMdShared()
self += KeywordDiskClientId()
self += KeywordDiskKeyring()
self += KeywordDiskLock()
self += KeywordDiskLockSharedTag()
self += KeywordDiskSize()
self += KeywordDiskImageFormat()
self += KeywordDiskOptions()
self += KeywordDiskScsireserv()
self += KeywordDiskPvs()
self += KeywordZPoolName()
self += KeywordZPoolPoolname()
self += KeywordZPoolVdev()
self += KeywordVmdgContainerid()
self += KeywordDiskDrbdRes()
self += KeywordFsType()
self += KeywordFsDev()
self += KeywordFsZone()
self += KeywordFsMnt()
self += KeywordFsMntOpt()
self += KeywordFsMkfsOpt()
self += KeywordFsSnapSize()
self += KeywordFsVg()
self += KeywordFsSize()
self += KeywordFsDirPath()
self += KeywordFsDirUser()
self += KeywordFsDirGroup()
self += KeywordFsDirPerm()
self += KeywordLoopFile()
self += KeywordLoopSize()
self += KeywordAppScript()
self += KeywordAppTimeout()
self += KeywordAppStart()
self += KeywordAppStop()
self += KeywordAppCheck()
self += KeywordAppInfo()
self += KeywordSyncNexentaName()
self += KeywordSyncNexentaFiler()
self += KeywordSyncNexentaPath()
self += KeywordSyncNexentaReversible()
self += KeywordSyncNetappFiler()
self += KeywordSyncNetappPath()
self += KeywordSyncNetappUser()
self += KeywordSyncIbmdssnapPairs()
self += KeywordSyncIbmdssnapArray()
self += KeywordSyncIbmdssnapBgcopy()
self += KeywordSyncIbmdssnapRecording()
self += KeywordSyncSymSrdfsSymid()
self += KeywordSyncSymSrdfsSymdg()
self += KeywordSyncSymSrdfsRdfg()
self += KeywordSyncSymcloneConsistent()
self += KeywordSyncSymcloneSymid()
self += KeywordSyncSymclonePairs()
self += KeywordSyncSymclonePrecopy()
self += KeywordSyncDcsckptDcs()
self += KeywordSyncDcsckptManager()
self += KeywordSyncDcsckptPairs()
self += KeywordSyncDcssnapDcs()
self += KeywordSyncDcssnapManager()
self += KeywordSyncDcssnapSnapname()
self += KeywordSyncNecismsnapArray()
self += KeywordSyncNecismsnapDevs()
self += KeywordSyncEvasnapEvaname()
self += KeywordSyncEvasnapSnapname()
self += KeywordSyncEvasnapPairs()
self += KeywordSyncHp3parArray()
self += KeywordSyncHp3parRcg()
self += KeywordSyncHp3parMode()
self += KeywordSyncHp3parMethod()
self += KeywordSyncHp3parSnapArray()
self += KeywordSyncHp3parSnapVvnames()
self += KeywordSyncDdsSrc()
self += KeywordSyncDdsDst()
self += KeywordSyncDdsTarget()
self += KeywordSyncDdsSnapSize()
self += KeywordVdiskPath()
self += KeywordHbType()
self += KeywordHbName()
self += KeywordSubsetParallel()
self += KeywordStonithType()
self += KeywordStonithTarget()
self += KeywordStonithCalloutCmd()
self += KeywordContainerType()
self += KeywordContainerZoneDeleteOnStop()
self += KeywordVmName()
self += KeywordOsvcRootPath()
self += KeywordContainerRcmd()
self += KeywordGuestos()
self += KeywordRootfs()
self += KeywordLxcCf()
self += KeywordJailRoot()
self += KeywordJailIps()
self += KeywordJailIp6s()
self += KeywordTemplate()
self += KeywordSharedIpGroup()
self += KeywordSize()
self += KeywordKeyName()
self += KeywordCloudId()
self += KeywordVmUuid()
self += KeywordVirtinst()
self += KeywordDockerDockerService()
self += KeywordDockerRunCommand()
self += KeywordDockerRunImage()
self += KeywordDockerRunArgs()
self += KeywordSnap()
self += KeywordSnapof()
self += KeywordContainerOrigin()
self += KeywordSrpIp()
self += KeywordSrpRootpath()
self += KeywordSrpPrmCores()
self += KeywordShareType()
self += KeywordSharePath()
self += KeywordShareNfsOpts()
self += KeywordTaskCommand()
self += KeywordTaskConfirmation()
self += KeywordTaskOnError()
self += KeywordTaskUser()
self += KeywordTaskSchedule()
if __name__ == "__main__":
store = KeyDict()
store.print_templates()
#print(store.DEFAULT.app)
#print(store['DEFAULT'])
opensvc-1.8~20170412/lib/resIpSgHP-UX.py 0000644 0001750 0001750 00000000177 13073467726 017546 0 ustar jkelbert jkelbert Res = __import__("resIpHP-UX")
class Ip(Res.Ip):
def start(self):
return 0
def stop(self):
return 0
opensvc-1.8~20170412/lib/rcAssetAIX.py 0000644 0001750 0001750 00000006072 13073467726 017356 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import rcAsset
class Asset(rcAsset.Asset):
def __init__(self, node):
rcAsset.Asset.__init__(self, node)
(out, err, ret) = justcall(['prtconf'])
if ret != 0:
self.prtconf = []
else:
self.prtconf = out.split('\n')
self.lpar = self.is_lpar()
def is_lpar(self):
cmd = ["prtconf", "-L"]
out, err, ret = justcall(cmd)
if ret != 0:
raise
if '-1' in out:
return False
return True
def _get_mem_bytes(self):
cmd = ["prtconf", "-m"]
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
l = out.split()
if 'Memory Size:' not in out:
return '0'
if len(l) != 4:
return '0'
size = int(l[2])
unit = l[3]
if unit == 'GB':
size = size * 1024
elif unit == 'MB':
pass
else:
return '0'
return str(size)
def _get_mem_banks(self):
if self.lpar:
return '0'
return 'TODO'
def _get_mem_slots(self):
if self.lpar:
return '0'
return 'TODO'
def _get_os_vendor(self):
return 'IBM'
def _get_os_name(self):
return 'AIX'
def _get_os_release(self):
cmd = ["oslevel", "-s"]
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
return out.strip()
def _get_os_kernel(self):
cmd = ["oslevel", "-r"]
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
return out.strip()
def _get_os_arch(self):
for line in self.prtconf:
if "Kernel Type:" in line:
return line.split(":")[-1].strip()
return 'Unknown'
def _get_cpu_freq(self):
for line in self.prtconf:
if "Processor Clock Speed:" in line:
return line.split(":")[-1].split()[0].strip()
return '0'
def _get_cpu_cores(self):
cmd = ["bindprocessor", "-q"]
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
l = out.split(":")
return str(len(l[-1].strip().split()))
def _get_cpu_dies(self):
cmd = ["lsdev", "-Cc", "processor"]
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
return str(len([line for line in out.split('\n') if 'proc' in line]))
def _get_cpu_model(self):
for line in self.prtconf:
if "Processor Type:" in line:
return line.split(":")[-1].strip()
return 'Unknown'
def _get_serial(self):
for line in self.prtconf:
if "Machine Serial Number:" in line:
return line.split(":")[-1].strip()
return 'Unknown'
def _get_model(self):
for line in self.prtconf:
if "System Model:" in line:
return line.split(":")[-1].strip()
return 'Unknown'
opensvc-1.8~20170412/lib/rcLvmAIX.py 0000644 0001750 0001750 00000017442 13073467726 017040 0 ustar jkelbert jkelbert from subprocess import *
"""
lsvg format
===========
VOLUME GROUP: rootvg VG IDENTIFIER: 00082a6a0000d400000001321aa20bf2
VG STATE: active PP SIZE: 64 megabyte(s)
VG PERMISSION: read/write TOTAL PPs: 959 (61376 megabytes)
MAX LVs: 256 FREE PPs: 717 (45888 megabytes)
LVs: 11 USED PPs: 242 (15488 megabytes)
OPEN LVs: 10 QUORUM: 2 (Enabled)
TOTAL PVs: 1 VG DESCRIPTORS: 2
STALE PVs: 0 STALE PPs: 0
ACTIVE PVs: 1 AUTO ON: yes
MAX PPs per VG: 32512
MAX PPs per PV: 1016 MAX PVs: 32
LTG size (Dynamic): 256 kilobyte(s) AUTO SYNC: no
HOT SPARE: no BB POLICY: relocatable
lsvg -l vgname format
=====================
rootvg:
LV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT
hd5 boot 1 1 1 closed/syncd N/A
hd6 paging 32 32 1 open/syncd N/A
hd8 jfs2log 1 1 1 open/syncd N/A
hd4 jfs2 16 16 1 open/syncd /
hd2 jfs2 40 40 1 open/syncd /usr
hd9var jfs2 16 16 1 open/syncd /var
hd3 jfs2 16 16 1 open/syncd /tmp
hd1 jfs2 16 16 1 open/syncd /home
hd10opt jfs2 16 16 1 open/syncd /opt
lv_logs jfs2 32 32 1 open/syncd /logs
lv_moteurs jfs2 56 56 1 open/syncd /moteurs
lspv format
===========
hdisk0 00082a6a1aa20b3c rootvg active
hdisk1 00082a6ae73c7bb6 datavg active
lspv -l pvname format
=====================
hdisk0:
LV NAME LPs PPs DISTRIBUTION MOUNT POINT
hd10opt 16 16 00..00..16..00..00 /opt
hd2 40 40 00..00..40..00..00 /usr
hd9var 16 16 00..00..16..00..00 /var
hd3 16 16 00..00..16..00..00 /tmp
hd1 16 16 00..00..16..00..00 /home
hd5 1 1 01..00..00..00..00 N/A
hd6 32 32 00..00..32..00..00 N/A
hd8 1 1 00..00..01..00..00 N/A
hd4 16 16 00..00..16..00..00 /
lv_logs 32 32 00..32..00..00..00 /logs
lv_moteurs 56 56 00..56..00..00..00 /moteurs
"""
class InitVgError(Exception):
pass
class Container(dict):
def __call__(self,key):
return self.__getitem__(key)
def __getitem__(self, key):
dict.__getitem__(self, key)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __iadd__(self, o):
dict.__setitem__(self, o.name, o)
return self
class Vg(object):
props = [{"prop": "ppsize", "key": "PP SIZE:", "consume": 2}]
def __init__(self, name):
self.name = name
self.lv = Container()
self.pv = Container()
self.load_vg(name)
self.load_lv(name)
def __str__(self):
l = []
l.append("type: vg")
l.append("name: %s"%self.name)
l.append("pp size: %d MB"%self.ppsize)
s = '\n'.join(l)
for lv in self.lv.values():
s += str(lv) + '\n'
return s
def parse_ppsize(self, l):
self.ppsize = int(l[0])
if 'megabyte' in l[1]:
pass
elif 'kilobyte' in l[1]:
self.ppsize /= 1024
elif 'gigabyte' in l[1]:
self.ppsize *= 1024
def load_vg(self, name):
cmd = ['lsvg', name]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitVgError()
for line in out.split('\n'):
for p in self.props:
if p['key'] not in line:
continue
_line = line[line.index(p['key'])+len(p['key']):]
l = _line.split()
getattr(self, 'parse_'+p['prop'])(l[0:p['consume']])
def load_lv(self, name):
cmd = ['lsvg', '-l', name]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitVgError()
for line in out.split('\n'):
if 'LV NAME' in line:
continue
l = line.split()
if len(l) < 6:
continue
_name, _type, _lps, _pps, _pvs, _state = l[0:6]
_mntpt = ' '.join(l[6:])
lv = Lv(_name)
lv.type = _type
lv.lps = int(_lps)
lv.pps = int(_pps)
lv.pvs = int(_pvs)
lv.state = _state
lv.mntpt = _mntpt
lv.size = lv.pps * self.ppsize
self.lv += lv
class Pv(object):
def __init__(self, name):
self.name = name
self.lv_pps = {}
self.load_lv_pps(name)
def load_lv_pps(self, name):
cmd = ['lspv', '-l', name]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitVgError()
for line in out.split('\n'):
if 'LV NAME' in line:
continue
l = line.split()
if len(l) < 4:
continue
_name, _lps, _pps, _distrib = l[0:4]
_mntpt = ' '.join(l[4:])
if name in self.lv_pps:
self.lv_pps[_name] += int(_pps)
else:
self.lv_pps[_name] = int(_pps)
class Lv(object):
def __init__(self, name):
self.name = name
self.pv_size = {}
def __str__(self):
l = []
l.append("type: lv")
l.append("name: %s"%self.name)
l.append("lv size: %d MB"%self.size)
l.append("pv usage: %s"%self.pv_size.items())
return '\n'.join(l)
class Lvm(object):
def __init__(self):
self.vg = Container()
self.pv = Container()
self.load_vg()
self.load_pv()
def __str__(self):
s = ""
for vgname, vg in self.vg.items():
s += str(vg) + '\n\n'
return s
def load_vg(self):
cmd = ['lsvg']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitVgError()
for vg in out.split():
self.vg += Vg(vg)
def load_pv(self):
cmd = ['lspv']
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise InitVgError()
for line in out.split('\n'):
l = line.split()
if len(l) != 4:
continue
_name, _id, _vgname, _state = l
pv = Pv(_name)
pv.id = _id
pv.vgname = _vgname
pv.state = _state
self.pv += pv
for lvname, lvpps in pv.lv_pps.items():
vg, lv = self.find_lv(lvname)
if lv is None:
continue
lv.pv_size[pv.name] = vg.ppsize * lvpps
def find_lv(self, lvname):
for vg in self.vg.values():
for lv in vg.lv.values():
if lv.name == lvname:
return vg, lv
return None, None
if __name__ == "__main__" :
lvm = Lvm()
print(lvm)
opensvc-1.8~20170412/lib/resContainerSrp.py 0000644 0001750 0001750 00000017546 13073467726 020541 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import rcStatus
import resources as Res
from rcUtilities import which, qcall, justcall
import resContainer
import rcExceptions as ex
class Srp(resContainer.Container):
def files_to_sync(self):
return [self.export_file]
def get_rootfs(self):
return self.get_state()['state']
def rcp_from(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
src = rootfs + src
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
dst = rootfs + dst
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def install_drp_flag(self):
rootfs = self.get_rootfs()
flag = os.path.join(rootfs, ".drp_flag")
self.log.info("install drp flag in container : %s"%flag)
with open(flag, 'w') as f:
f.write(' ')
f.close()
def container_start(self):
cmd = ['srp', '-start', self.name]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError()
def container_stop(self):
cmd = ['srp', '-stop', self.name]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError()
def container_forcestop(self):
raise ex.excError
def operational(self):
cmd = self.runmethod + ['pwd']
ret = qcall(cmd)
if ret == 0:
return True
return False
def get_verbose_list(self):
"""
Name: iisdevs1 Template: system Service: provision ID: 1
----------------------------------------------------------------------
autostart=0
srp_name=iisdevs1
...
"""
cmd = ['srp', '-list', self.name, '-v']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("srp -list returned %d:\n%s"%(ret, err))
data = {}
words = out.split()
for i, w in enumerate(words):
if w == "Service:":
service = words[i+1]
if '=' in w:
key = service + '.' + w[:w.index('=')]
val = w[w.index('=')+1:]
data[key] = val
return data
def get_verbose_status(self):
"""
SRP Status:
----------------- Status for SRP:iisdevs1 ----------------------
Status:MAINTENANCE
Type:system Subtype:private Rootpath:/var/hpsrp/iisdevs1
IP:10.102.184.12 Interface:lan0:1 (DOWN) id: 1
MEM Entitle:50.00% MEM Max:(none) Usage:0.00%
CPU Entitle:9.09% CPU Max:(none) Usage:0.00%
"""
cmd = ['srp', '-status', self.name, '-v']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("srp -status returned %d:\n%s"%(ret, err))
data = {'ip': [], 'mem': {}, 'cpu': {}}
words = out.split()
for i, w in enumerate(words):
if w.startswith('IP'):
ip = w.replace('IP:','')
intf = words[i+1].replace('Interface:','')
state = words[i+2].strip('(').strip(')')
id = words[i+4]
data['ip'].append({
'ip': ip,
'intf': intf,
'state': state,
'id': id,
})
elif w == "MEM" and words[i+1].startswith("Entitle"):
entitle = words[i+1].replace('Entitle:','')
max = words[i+3].replace('Max:','')
usage = words[i+4].replace('Usage:','')
data['mem'] = {
'entitle': entitle,
'max': max,
'usage': usage,
}
elif w == "CPU" and words[i+1].startswith("Entitle"):
entitle = words[i+1].replace('Entitle:','')
max = words[i+3].replace('Max:','')
usage = words[i+4].replace('Usage:','')
data['cpu'] = {
'entitle': entitle,
'max': max,
'usage': usage,
}
return data
def get_status(self, nodename=None):
"""
NAME TYPE STATE SUBTYPE ROOTPATH
iisdevs1 system maintenance private /var/hpsrp/iisdevs1
"""
cmd = ['srp', '-status', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("srp -status returned %d:\n%s"%(ret, err))
lines = out.split('\n')
if len(lines) < 2:
raise ex.excError("srp -status output too short:\n%s"%out)
l = lines[1].split()
if l[0] != self.name:
raise ex.excError("srp -status second line, first entry does not match container name")
if len(l) != 5:
raise ex.excError("unexpected number of entries in %s"%str(l))
_type, _state, _subtype, _rootpath = l[1:]
return {
'type': l[1],
'state': l[2],
'subtype': l[3],
'rootpath': l[4],
}
def is_down(self):
d = self.get_status()
if d['state'] == 'stopped':
return True
return False
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
d = self.get_status(nodename)
if d['state'] == 'started':
return True
return False
def get_container_info(self):
return {'vcpus': '0', 'vmem': '0'}
def check_manual_boot(self):
try:
val = self.get_verbose_list()['init.autostart']
except ex.excError:
return False
if val == 'yes' or val == '1':
return False
return True
def check_capabilities(self):
if not which('srp'):
self.log.debug("srp is not in PATH")
return False
return True
def presync(self):
self.container_export()
def postsync(self):
self.container_import()
def container_import(self):
if not os.path.exists(self.export_file):
raise ex.excError("%s does not exist"%self.export_file)
cmd = ['srp', '-batch', '-import', '-xfile', self.export_file, 'allow_sw_mismatch=yes', 'autostart=no']
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError()
def container_export(self):
cmd = ['srp', '-batch', '-export', self.name, '-xfile', self.export_file]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError()
def __init__(self,
rid,
name,
guestos="HP-UX",
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.srp",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.export_file = os.path.join(rcEnv.pathvar, name + '.xml')
self.runmethod = ['srp_su', name, 'root', '-c']
def provision(self):
m = __import__("provSrp")
prov = m.ProvisioningSrp(self)
prov.provisioner()
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
opensvc-1.8~20170412/lib/checkFsUsageHP-UX.py 0000644 0001750 0001750 00000002674 13073467726 020531 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lP']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
# discard bind mounts: we get metric from the source anyway
if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'):
continue
if l[5].startswith('/Volumes'):
continue
if l[5].startswith('/run'):
continue
if l[5].startswith('/sys/'):
continue
if l[5] == "/dev/shm":
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': self.find_svc(l[5]),
})
return r
opensvc-1.8~20170412/lib/rcCollectorCli.py 0000755 0001750 0001750 00000273776 13073467726 020337 0 ustar jkelbert jkelbert # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import stat
import json
import optparse
import shlex
import re
import copy
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import readline
import atexit
import fnmatch
import rcExceptions as ex
from rcGlobalEnv import Storage
try:
import requests
except:
raise ex.excError("This feature requires the python requests module")
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
# the collector api doc uses restructured text we'll have to print
# in the command help messages
import textwrap
try:
import docutils.utils
import docutils.parsers
import docutils.parsers.rst
has_docutils = True
except:
has_docutils = False
from rcUtilities import bdecode
from rcColor import formatter
if sys.version_info[0] >= 3:
raw_input = input
progname = "opensvc-cli"
homedir = os.path.expanduser("~")
api_cache_f = os.path.join(homedir, "."+progname+".api")
conf_f = os.path.join(homedir, "."+progname)
conf_section = "collector"
history_f = conf_f + "_history"
global path
path = "/"
api_cache = None
ls_info_default = {
"filter": "id",
"props": ["id"],
"fmt": "%(id)-10s",
}
ls_info = {
"": {
"filter": "path",
"props": ["path"],
"fmt": " %(path)s",
},
"action_queue": {
"filter": "id",
"props": ["id", "command"],
"fmt": "%(id)-10s %(command)s",
},
"apps": {
"filter": "app",
"props": ["id", "app"],
"fmt": "%(id)-10s %(app)s",
},
"nodes": {
"filter": "nodename",
"props": ["node_id", "nodename"],
"fmt": "%(node_id)s %(nodename)s",
},
"services": {
"filter": "svcname",
"props": ["svc_id", "svcname"],
"fmt": "%(svc_id)s %(svcname)s",
},
"rulesets": {
"filter": "ruleset_name",
"props": ["id", "ruleset_name"],
"fmt": "%(id)-10s %(ruleset_name)s",
},
"modulesets": {
"filter": "modset_name",
"props": ["id", "modset_name"],
"fmt": "%(id)-10s %(modset_name)s",
},
"users": {
"filter": "email",
"props": ["id", "email"],
"fmt": "%(id)-10s %(email)s",
},
"groups": {
"filter": "role",
"props": ["id", "role"],
"fmt": "%(id)-10s %(role)s",
},
"tags": {
"filter": "tag_name",
"props": ["tag_id", "tag_name"],
"fmt": "%(tag_id)s %(tag_name)s",
},
"variables": {
"filter": "var_name",
"props": ["id", "var_name"],
"fmt": "%(id)-10s %(var_name)s",
},
"modules": {
"filter": "modset_mod_name",
"props": ["id", "modset_mod_name"],
"fmt": "%(id)-10s %(modset_mod_name)s",
},
"filters": {
"filter": "f_label",
"props": ["id", "f_label"],
"fmt": "%(id)-10s %(f_label)s",
},
"filtersets": {
"filter": "fset_name",
"props": ["id", "fset_name"],
"fmt": "%(id)-10s %(fset_name)s",
},
}
#
# requests setup
#
try:
requests.packages.urllib3.disable_warnings()
except:
pass
class Cmd(object):
# color codes
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def __init__(self, cli=None):
self.cli = cli
self.options = Storage()
self.options.format=cli.options.format
@formatter
def print_content(self, s):
data = json.loads(bdecode(s))
if self.options.format == "json":
return data
if "info" in data:
if isinstance(data["info"], list):
infos = data["info"]
else:
infos = [data["info"]]
for info in infos:
print("Info:", info)
if "error" in data:
if isinstance(data["error"], list):
errors = data["error"]
else:
errors = [data["error"]]
for error in errors:
print("Error:", error)
if "data" in data:
return data["data"]
return ""
def path_match_handlers(self, p):
for a, l in self.cli.api_o.get().items():
for d in l:
if path_match_handler(p, d):
return True
return False
def path_match_handlers_or_parents(self, p):
for a, l in self.cli.api_o.get().items():
for d in l:
if path_match_handler_or_parents(p, d):
return True
return False
def colorize(self, s, c=None):
if c is None:
return s
return c + s + self.END
def get_handler(self, action, p):
for d in self.cli.api_o.get()[action]:
if path_match_handler(p, d):
return d
def match(self, line):
"""
Tells if a CLI command line is to be handled by this command class,
using the first word as a telltale.
"""
l = line.split()
if len(l) == 0 or l[0] != self.command:
return False
return True
def replace_params_in_path(self, candidates_path, words):
d = {}
if candidates_path.count("<") == 0:
return candidates_path
for i, w in enumerate(words):
if i < len(words) - 1:
next_word = words[i+1]
else:
next_word = None
if w.startswith("--") and next_word is not None:
d[w.lstrip("-")] = next_word
p = copy.copy(candidates_path)
new_p = copy.copy(candidates_path)
while p.count("<") > 0:
try:
param = p[p.index("<")+1:p.index(">")]
p = p[p.index(">")+1:]
if param in d:
new_p = new_p.replace("<"+param+">", d[param])
except:
break
return new_p
def set_parser_options_from_cmdline(self, line):
words = shlex.split(line)
self.set_parser_options_from_words(words)
def set_parser_options_from_words(self, words):
if len(words) == 0:
return
paths = [w for w in words if w.startswith("/")]
if len(paths) == 0:
return
path = paths[0]
try:
self.set_parser_options(path)
except Exception as e:
print(e)
def candidates(self, pattern, words):
candidates = []
if hasattr(self, "candidates_path"):
param = words[-1]
if not param in self.candidates_path and len(words) > 2 and pattern != "":
param = words[-2]
candidates_path = self.candidates_path.get(param)
else:
candidates_path = None
if candidates_path:
candidates_path = self.replace_params_in_path(candidates_path, words)
pattern = candidates_path + "/" + pattern
else:
self.set_parser_options_from_words(words)
for o in self.parser.option_list:
candidates += o._long_opts
if pattern is None:
pattern = ""
elif pattern == ".." or pattern.endswith("/.."):
pass
elif pattern.startswith("/") and pattern.endswith("/"):
pass
elif pattern.count("/") == 0:
pattern += "*"
else:
pattern = pattern[:pattern.rindex("/")+1]
ls_data = self.ls_data(pattern)
for e in ls_data:
if e.startswith("OBJ"):
candidate = e.replace("OBJ", "").strip()
l = candidate.split()
if len(l) > 1 and len(candidate) > 11:
#
candidate = candidate[11:]
if candidates_path is None and pattern.count("/") > 0:
candidate = pattern.rstrip("/")+"/"+candidate
candidates.append(candidate)
elif candidates_path is None and e.startswith("API") and e != "API":
candidate = e.split()[-1]
if pattern.startswith("/") and not candidate.startswith("/"):
candidate = pattern + candidate
candidates.append(candidate)
return candidates
def args_to_path(self, args):
try:
arg1 = args[1]
if arg1.startswith("/"):
_path = arg1
else:
_path = copy.copy(path) + "/" + args[1]
except:
_path = copy.copy(path)
return _path
def get_data_from_options(self, options):
data = {}
files = {}
headers = None
if options is None or "data" not in options.__dict__ or options.data is None:
return data, files, headers
for d in options.data:
if len(d) > 0 and d[0] == "@" and os.path.exists(d[1:]):
with open(d[1:], 'r') as fd:
data = fd.read()
headers = {
'Accept' : 'application/json',
'Content-Type' : 'application/json; charset=utf-8'
}
return data, files, headers
if d.count("=") == 0 or len(d) < d.index("=")+1:
print("ignore malformated data:", d)
continue
key = d[:d.index("=")]
val = d[d.index("=")+1:]
if len(val) > 1 and val[0] == "@" and os.path.exists(val[1:]):
fpath = val[1:]
try:
fd = open(fpath, 'rb')
except:
print("error opening file %s" % fpath, file=sys.stderr)
raise
files["file"] = (os.path.realpath(fpath), fd)
else:
data[key] = val
return data, files, headers
def factorize_dot_dot(self, p):
l1 = p.split("/")
l2 = []
for i, e in enumerate(l1):
if i == 0:
l2.append(e)
continue
if e == "..":
l2.pop()
continue
l2.append(e)
if l2 == [""]:
p = "/"
else:
p = "/".join(l2)
if len(p) > 1:
p.rstrip("/")
return p
def ls_data(self, line):
ls_data = []
global path
line = line.strip()
# strip the ls command
relpath = re.sub("^\s*ls\s+", "", line)
if relpath == "ls":
relpath = ""
relpath = relpath.strip()
if relpath == ".." or relpath.endswith("/.."):
relpath += "/"
p = get_fullpath(relpath)
if p.count("/") == 0:
relpath = ""
raw_req_path = copy.copy(path)
shell_pattern = p
elif self.is_glob(p[p.rindex("/"):]):
v = p.split("/")
raw_req_path = "/".join(v[:-1])
shell_pattern = v[-1]
v = relpath.split("/")
relpath = "/".join(v[:-1])
else:
raw_req_path = p
shell_pattern = ""
req_path = self.factorize_dot_dot(raw_req_path)
sql_pattern = shell_pattern.replace("*", "%")
sql_pattern = sql_pattern.replace("?", "_")
last = req_path.rstrip("/").split("/")[-1]
info = ls_info.get(last, ls_info_default)
props = info.get("props", [])
filter_prop = info.get("filter", "id")
fmt = info.get("fmt", "%(id)s")
if req_path not in ("/", "") and self.path_match_handlers(req_path):
# object listing
params = {
"limit": 0,
"meta": 0,
"props": ",".join(props),
}
if len(sql_pattern) > 0:
params["query"] = filter_prop + " like " + sql_pattern
r = requests.get(self.cli.api+req_path, params=params, auth=self.cli.auth, verify=not self.cli.insecure)
data = json.loads(bdecode(r.content)).get("data")
if type(data) == list:
ls_data += map(lambda d: "OBJ " + fmt % d, data)
if self.api_candidates:
# api paths listing
info = ls_info.get("", ls_info_default)
fmt = info.get("fmt", "%(id)s")
props = info.get("props", [])
filter_prop = info.get("filter", [])
data = [d for d in self.get_handler_paths() if path_match_handler_or_parents(req_path, d) and d["path"] != req_path]
#data += path_children_api(req_path)
if len(shell_pattern) > 0:
if not shell_pattern.startswith("/"):
shell_pattern = req_path + "/" + shell_pattern
shell_pattern = shell_pattern.replace("//", "/")
data = [d for d in data if fnmatch.fnmatch(d.get(filter_prop), shell_pattern)]
for i, d in enumerate(data):
data[i]["path"] = re.sub("^"+req_path, relpath, d["path"])
ls_data += map(lambda d: "API " + fmt % d, data)
return ls_data
def get_handler_paths(self):
data = self.cli.api_o.get()
all_handlers = []
all_paths = []
for l in data.values():
for h in l:
if h["path"] not in all_paths:
all_handlers.append(h)
all_paths.append(h["path"])
return sorted(all_handlers, key=lambda x: x["path"])
def is_glob(self, s):
if len(set(s) & set("?*[")) > 0:
return True
return False
def set_parser_options(self, path):
try:
h = self.get_handler(self.command.upper(), path)
except:
return
if h is None:
return self.parser
for o in self.parser._get_all_options()[1:]:
self.parser.remove_option(str(o))
if hasattr(self, "parser_options"):
for o in self.parser_options:
self.parser.add_option(o)
for param, d in h["params"].items():
if d.get("type") == "list":
action = "append"
default = []
else:
action = "append"
default = None
self.parser.add_option("--"+param, default=default, action=action, dest=param, help=d["desc"])
class IndentedHelpFormatterRst(optparse.IndentedHelpFormatter):
def format_description(self, description):
if not has_docutils:
return description
if not description:
return ""
doc = docutils.utils.new_document("foo")
doc.settings.tab_width = 4
doc.settings.pep_references = None
doc.settings.rfc_references = None
p = docutils.parsers.rst.Parser()
p.parse(description, doc)
description = doc.astext()
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit, desc_width, initial_indent=indent, subsequent_indent=indent)
for bit in bits
]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# Everything is the same up through here
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
# Everything is the same after here
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line) for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
class CliError(Exception):
pass
class Api(object):
api_cache = None
def __init__(self, cli=None, refresh=False):
self.cli = cli
self.load(refresh=refresh)
def load(self, refresh=False):
if not refresh and os.path.exists(api_cache_f):
# try local cache first
try:
with open(api_cache_f, 'r') as f:
self.api_cache = json.loads(f.read())
return
except Exception as e:
print(e)
os.unlink(api_cache_f)
# fallback to fetching the cache
print("load api cache")
r = requests.get(self.cli.api, auth=self.cli.auth, verify=not self.cli.insecure)
try:
self.api_cache = json.loads(bdecode(r.content))["data"]
except:
raise CliError(r.content)
# update local cache
with open(api_cache_f, 'w') as f:
f.write(json.dumps(self.api_cache, indent=4))
def get(self):
if self.api_cache:
return copy.deepcopy(self.api_cache)
self.load()
return copy.deepcopy(self.api_cache)
class OptionParsingError(RuntimeError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionParsingExit(Exception):
def __init__(self, status, msg):
self.msg = msg
self.status = status
def __str__(self):
return self.msg
class CmdOptionParser(optparse.OptionParser):
def __init__(self, *args, **vars):
vars["formatter"] = IndentedHelpFormatterRst()
optparse.OptionParser.__init__(self, *args, **vars)
def error(self, msg):
raise OptionParsingError(msg)
def exit(self, status=0, msg=None):
raise OptionParsingExit(status, msg)
def get_fullpath(relpath):
if relpath.startswith("/"):
return relpath
if relpath == "":
return path
return path+"/"+relpath
class CmdHelp(Cmd):
command = "help"
desc = "Print this help message."
parser = CmdOptionParser(description=desc)
def cmd(self, line):
from textwrap import TextWrapper
wrapper = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=78)
commands_h = {}
for c in self.cli.commands:
commands_h[c.command] = c
base_commands = sorted(commands_h.keys())
for command in base_commands:
c = commands_h[command]
print(c.command)
print()
if hasattr(c, "desc"):
print(wrapper.fill(c.desc))
print()
class CmdLs(Cmd):
api_candidates = True
command = "ls"
desc = "List the API handlers and available objects matching the given pattern."
parser = CmdOptionParser(description=desc)
def cmd(self, line):
ls_data = self.ls_data(line)
for s in ls_data:
print(s)
class CmdDelete(Cmd):
api_candidates = True
command = "delete"
desc = "Execute a DELETE request on the given API handler."
parser = CmdOptionParser(description=desc)
parser_options = [
optparse.make_option("--data", default=None, action="append", dest="data",
help="A key=value pair to filter the deleted data. Multiple --data can be specified.")
]
def cmd(self, line):
self.set_parser_options_from_cmdline(line)
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
data, files, headers = self.get_data_from_options(options)
params = {}
if 'query' in options.__dict__ and options.query is not None:
params["query"] = options.query
_path = self.args_to_path(args)
r = requests.delete(self.cli.api+_path, params=params, data=data, headers=headers, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdPost(Cmd):
api_candidates = True
command = "post"
desc = "Execute a POST request on the given API handler. The data can be set using --data."
parser = CmdOptionParser(description=desc)
parser_options = [
optparse.make_option("--data", default=None, action="append", dest="data",
help="A key=value pair to include in the post data. Multiple --data can be specified.")
]
def cmd(self, line):
self.set_parser_options_from_cmdline(line)
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
data, files, headers = self.get_data_from_options(options)
params = {}
if 'query' in options.__dict__ and options.query is not None:
params["query"] = options.query
_path = self.args_to_path(args)
r = requests.post(self.cli.api+_path, data=data, files=files, params=params, headers=headers, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdPut(Cmd):
api_candidates = True
command = "put"
desc = "Execute a PUT request on the given API handler. The data can be set using --data."
parser = CmdOptionParser(description=desc)
parser_options = [
optparse.make_option("--data", default=None, action="append", dest="data",
help="A key=value pair to include in the post data. Multiple --data can be specified.")
]
def cmd(self, line):
self.set_parser_options_from_cmdline(line)
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
data, files, headers = self.get_data_from_options(options)
_path = self.args_to_path(args)
r = requests.put(self.cli.api+_path, data=data, files=files, headers=headers, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdSafe(Cmd):
command = "safe"
desc = "Upload, download and manage files in the collector safe. The safe is a file sharing facility with access control rules for nodes and users making it suitable to serve compliance reference files."
parser = CmdOptionParser(description=desc)
parser.add_option("--ls", default=None, action="store_true", dest="ls",
help="List the accessible files in the safe.")
parser.add_option("--upload", default=None, action="store_true", dest="upload",
help="Upload the file pointed by --file to the safe. Optionally give a name using --name.")
parser.add_option("--download", default=None, action="store_true", dest="download",
help="Download from the safe the file pointed by --file to the file path or directory pointed by --to.")
parser.add_option("--file", default=None, action="store", dest="file",
help="The safe file uuid to download, or the local file to upload.")
parser.add_option("--to", default=None, action="store", dest="to",
help="The local file path or directory name to download.")
parser.add_option("--name", default=None, action="store", dest="name",
help="The user-friendly name to attach to the upload.")
candidates_path = {
"--file": "/safe",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.ls(options)
self.upload(options)
self.download(options)
def ls(self, options):
if options.ls is None:
return
params = {}
r = requests.get(self.cli.api+"/safe", params=params, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def upload(self, options):
if options.upload is None:
return
if options.file is None:
raise CliError("--file is mandatory for --upload")
data = {}
if options.name:
data["name"] = options.name
if not os.path.exists(options.file):
raise CliError("%s file not found" % options.file)
files = {
"file": (os.path.realpath(options.file), open(options.file, 'rb')),
}
r = requests.post(self.cli.api+"/safe/upload", data=data, files=files, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def download(self, options):
if options.download is None:
return
if options.file is None:
raise CliError("--file is mandatory for --download")
if options.to is None:
raise CliError("--to is mandatory for --download")
if os.path.exists(options.to) and os.path.isdir(options.to):
to = os.path.join(options.to, options.file)
else:
to = options.to
r = requests.get(self.cli.api+"/safe/"+options.file+"/download", stream=True, auth=self.cli.auth, verify=not self.cli.insecure)
if not r.ok:
try:
d = json.loads(bdecode(r.content))
print(d["error"], file=sys.stderr)
return
except:
pass
raise CliError("download failed")
with open(options.to, 'wb') as f:
for block in r.iter_content(1024):
print(".")
f.write(block)
print("downloaded")
class CmdSysreport(Cmd):
command = "sysreport"
desc = "Show sysreport information"
parser = CmdOptionParser(description=desc)
parser.add_option("--log", default=None, action="store_true", dest="log",
help="")
parser.add_option("--begin", default=None, action="store", dest="begin",
help="The sysreport analysis begin date.")
parser.add_option("--end", default=None, action="store", dest="end",
help="The sysreport analysis begin date.")
parser.add_option("--path", default=None, action="store", dest="path",
help="A path globing pattern to limit the sysreport analysis to.")
parser.add_option("--node", default=None, action="store", dest="node",
help="The sysreport node name.")
parser.add_option("--cid", default=None, action="store", dest="cid",
help="The commit id to show as diff. This cid is displayed in the summary listing obtained by the --log action without specifying --cid.")
candidates_path = {
"--node": "/nodes",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.log(options)
self.log_cid(options)
def print_log(self, data):
for d in data["data"]:
print(self.colorize("cid: %s" % d["cid"], c=self.DARKCYAN))
print(self.colorize("change detection date: %s" % d["start"].replace("T", " "), c=self.GREEN))
if "summary" in d:
print()
print(" "+d["summary"])
print()
for fpath in d["stat"]:
print(" "+fpath)
print()
def print_log_cid(self, data):
for fpath, d in data["data"]["blocks"].items():
print(self.colorize("path: " + fpath, c=self.DARKCYAN))
if d["secure"]:
print(self.colorize("visible: by node responsibles", c=self.DARKRED))
else:
print(self.colorize("visible: by everyone", c=self.DARKCYAN))
print()
for line in d["diff"].split("\n"):
if line.startswith("-"):
c = self.RED
elif line.startswith("+"):
c = self.GREEN
else:
c = None
print(self.colorize(line, c=c))
print()
def log(self, options):
if options.log is None or options.node is None or options.cid is not None:
return
params = {}
if options.begin:
params["begin"] = options.begin
if options.end:
params["end"] = options.end
if options.path:
params["path"] = options.path
r = requests.get(self.cli.api+"/nodes/"+options.node+"/sysreport", params=params, auth=self.cli.auth, verify=not self.cli.insecure)
data = json.loads(bdecode(r.content))
self.print_log(data)
def log_cid(self, options):
if options.log is None or options.node is None or options.cid is None:
return
params = {}
if options.path:
params["path"] = options.path
r = requests.get(self.cli.api+"/nodes/"+options.node+"/sysreport/"+options.cid, params=params, auth=self.cli.auth, verify=not self.cli.insecure)
data = json.loads(bdecode(r.content))
self.print_log_cid(data)
class CmdFilter(Cmd):
command = "filter"
desc = "Handle design actions on a filter"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List filters")
parser.add_option("--show", default=None, action="store_true", dest="show",
help="Show a filter design")
parser.add_option("--create", default=None, action="store_true", dest="create",
help="Create a filter")
parser.add_option("--delete", default=None, action="store_true", dest="delete",
help="Delete a filter")
parser.add_option("--set", default=None, action="store_true", dest="set",
help="Set filter properties")
parser.add_option("--attach", default=None, action="store_true", dest="attach",
help="Attach a filter to the filterset pointed by --filterset")
parser.add_option("--detach", default=None, action="store_true", dest="detach",
help="Detach a filter from the filterset pointed by --filterset")
parser.add_option("--filter", default=None, action="store", dest="filter",
help="The name or id of the filter to manage")
parser.add_option("--filterset", default=None, action="store", dest="filterset",
help="The name or id of the filterset to attach to or detach from")
parser.add_option("--value", default=None, action="store", dest="value",
help="with --set or --create, set the filter value parameter")
parser.add_option("--operator", default=None, action="store", dest="operator",
help="with --set or --create, set the filter operator parameter. Accepted operators: =, <, >, <=, >=, LIKE, IN")
parser.add_option("--field", default=None, action="store", dest="field",
help="with --set or --create, set the filter field parameter")
parser.add_option("--table", default=None, action="store", dest="table",
help="with --set or --create, set the filter table parameter")
parser.add_option("--order", default=None, action="store", dest="order",
help="with --attach, set the filter attachment order parameter. Integer.")
parser.add_option("--logical-operator", default=None, action="store", dest="logical_operator",
help="with --attach, set the filter attachment logical operator parameter. Accepted operators: AND, OR, AND NOT, OR NOT")
candidates_path = {
"--filterset": "/filtersets",
"--filter": "/filters",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
if options.filter:
options.filter = options.filter.replace("%", "(percent)")
self.list_filters(options)
self.show_filter(options)
self.create_filter(options)
self.delete_filter(options)
self.set_filter(options)
self.attach_filter_to_filterset(options)
self.detach_filter_from_filterset(options)
def list_filters(self, options):
if options.list is None:
return
p = "/filters"
o = CmdLs().cmd(p)
def show_filter(self, options):
if options.show is None or options.filter is None:
return
o = CmdShow()
data = o.get_data("/filters/"+options.filter)
o.print_filter(options.filter, data)
def create_filter(self, options):
if options.create is None:
return
global path
_path = "/filters"
data = {
"f_table": options.table,
"f_field": options.field,
"f_op": options.operator,
"f_value": options.value,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def delete_filter(self, options):
if options.delete is None or options.filter is None:
return
global path
_path = "/filters/"+options.filter
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_filter_to_filterset(self, options):
if options.attach is None or options.filterset is None or options.filter is None:
return
_path = "/filtersets/%s/filters/%s" % (options.filterset, options.filter)
data = {}
if options.logical_operator:
data["f_log_op"] = options.logical_operator
if options.logical_operator:
data["f_order"] = options.order
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_filter_from_filterset(self, options):
if options.detach is None or options.filterset is None or options.filter is None:
return
_path = "/filtersets/%s/filters/%s" % (options.filterset, options.filter)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_filter(self, options):
if options.set is None or options.filter is None:
return
data = {}
if options.table is not None:
data["f_table"] = options.table
if options.operator is not None:
data["f_op"] = options.operator
if options.table is not None:
data["f_field"] = options.field
if options.value is not None:
data["f_value"] = options.value
if len(data) == 0:
return
_path = "/filters/%s" % options.filter
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdFilterset(Cmd):
command = "filterset"
desc = "Handle design actions on a filterset"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List filtersets")
parser.add_option("--show", default=None, action="store_true", dest="show",
help="Show a filterset design, with nesting")
parser.add_option("--create", default=None, action="store_true", dest="create",
help="Create a filterset")
parser.add_option("--delete", default=None, action="store_true", dest="delete",
help="Delete a filterset")
parser.add_option("--set", default=None, action="store_true", dest="set",
help="Set filterset properties")
parser.add_option("--attach", default=None, action="store_true", dest="attach",
help="Attach a filterset to the filterset pointed by --parent-filterset")
parser.add_option("--detach", default=None, action="store_true", dest="detach",
help="Detach a filterset from the filterset pointed by --parent-filterset")
parser.add_option("--rename", default=None, action="store_true", dest="rename",
help="Rename a filterset")
parser.add_option("--filterset", default=None, action="store", dest="filterset",
help="The name or id of the filterset to manage")
parser.add_option("--parent-filterset", default=None, action="store", dest="parent_filterset",
help="The name or id of the filterset to attach to or detach from")
parser.add_option("--to", default=None, action="store", dest="to",
help="with --rename, set the new filterset name")
parser.add_option("--stats", default=None, action="store_true", dest="stats",
help="with --set, set the filterset stats parameter to true")
parser.add_option("--not-stats", default=None, action="store_false", dest="stats",
help="with --set, set the filterset stats parameter to false")
candidates_path = {
"--to": "/filtersets",
"--filterset": "/filtersets",
"--parent-filterset": "/filtersets",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.list_filtersets(options)
self.show_filterset(options)
self.create_filterset(options)
self.delete_filterset(options)
self.set_filterset(options)
self.attach_filterset_to_filterset(options)
self.detach_filterset_from_filterset(options)
self.rename_filterset(options)
def list_filtersets(self, options):
if options.list is None:
return
p = "/filtersets"
o = CmdLs().cmd(p)
def show_filterset(self, options):
if options.show is None or options.filterset is None:
return
o = CmdShow()
data = o.get_data("/filtersets/"+options.filterset)
o.print_filterset(options.filterset, data)
def create_filterset(self, options):
if options.create is None or options.filterset is None:
return
global path
_path = "/filtersets"
data = {
"fset_name": options.filterset,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def delete_filterset(self, options):
if options.delete is None or options.filterset is None:
return
global path
_path = "/filtersets/"+options.filterset
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_filterset_to_filterset(self, options):
if options.attach is None or options.parent_filterset is None or options.filterset is None:
return
_path = "/filtersets/%s/filtersets/%s" % (options.parent_filterset, options.filterset)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_filterset_from_filterset(self, options):
if options.detach is None or options.parent_filterset is None or options.filterset is None:
return
_path = "/filtersets/%s/filtersets/%s" % (options.parent_filterset, options.filterset)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def rename_filterset(self, options):
if options.rename is None or options.filterset is None or options.to is None:
return
data = {
"fset_name": options.to,
}
_path = "/filtersets/%s" % options.filterset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_filterset(self, options):
if options.set is None or options.filterset is None:
return
self.set_filterset_stats(options)
def set_filterset_stats(self, options):
if options.stats is None:
return
data = {
"fset_stats": options.stats,
}
_path = "/filtersets/%s" % options.filterset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdModuleset(Cmd):
command = "moduleset"
desc = "Handle design actions on a compliance moduleset"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List modulesets")
parser.add_option("--show", default=None, action="store_true", dest="show",
help="Show a moduleset design, with nesting")
parser.add_option("--clone", default=None, action="store_true", dest="clone",
help="Clone a moduleset, including modules, moduleset-moduleset and moduleset-ruleset relations. Reset the publication as responsible groups.")
parser.add_option("--create", default=None, action="store_true", dest="create",
help="Create a moduleset")
parser.add_option("--delete", default=None, action="store_true", dest="delete",
help="Delete a moduleset")
parser.add_option("--attach", default=None, action="store_true", dest="attach",
help="Attach the moduleset to a moduleset")
parser.add_option("--detach", default=None, action="store_true", dest="detach",
help="Detach the moduleset from a moduleset")
parser.add_option("--rename", default=None, action="store_true", dest="rename",
help="Rename a moduleset")
parser.add_option("--moduleset", default=None, action="store", dest="moduleset",
help="The name or id of the moduleset to manage")
parser.add_option("--parent-moduleset", default=None, action="store", dest="parent_moduleset",
help="The name or id of the moduleset to attach to or detach from")
parser.add_option("--publication-group", default=None, action="store", dest="publication_group",
help="The name or id of the group to attach or detach as publication")
parser.add_option("--responsible-group", default=None, action="store", dest="responsible_group",
help="The name or id of the group to attach or detach as responsible")
parser.add_option("--to", default=None, action="store", dest="to",
help="with --rename, set the new moduleset name")
candidates_path = {
"--moduleset": "/compliance/modulesets",
"--parent-moduleset": "/compliance/modulesets",
"--publication-group": "/groups",
"--responsible-group": "/groups",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.list_modulesets(options)
self.clone_moduleset(options)
self.show_moduleset(options)
self.create_moduleset(options)
self.delete_moduleset(options)
self.attach_publication_group_to_moduleset(options)
self.detach_publication_group_from_moduleset(options)
self.attach_responsible_group_to_moduleset(options)
self.detach_responsible_group_from_moduleset(options)
self.attach_moduleset_to_moduleset(options)
self.detach_moduleset_from_moduleset(options)
self.rename_moduleset(options)
def list_modulesets(self, options):
if options.list is None:
return
p = "/compliance/modulesets"
o = CmdLs().cmd(p)
def show_moduleset(self, options):
if options.show is None:
return
o = CmdShow()
data = o.get_data("/compliance/modulesets/"+options.moduleset)
o.print_moduleset(options.moduleset, data)
def create_moduleset(self, options):
if options.create is None or options.moduleset is None:
return
global path
_path = "/compliance/modulesets"
data = {
"modset_name": options.moduleset,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def clone_moduleset(self, options):
if options.clone is None or options.moduleset is None:
return
data = {
"action": "clone",
}
_path = "/compliance/modulesets/%s" % options.moduleset
r = requests.put(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def delete_moduleset(self, options):
if options.delete is None or options.moduleset is None:
return
global path
_path = "/compliance/modulesets/"+options.moduleset
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_publication_group_to_moduleset(self, options):
if options.attach is None or options.publication_group is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/publications/%s" % (options.moduleset, options.publication_group)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_responsible_group_to_moduleset(self, options):
if options.attach is None or options.responsible_group is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/responsibles/%s" % (options.moduleset, options.responsible_group)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_publication_group_from_moduleset(self, options):
if options.detach is None or options.publication_group is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/publications/%s" % (options.moduleset, options.publication_group)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_responsible_group_from_moduleset(self, options):
if options.detach is None or options.responsible_group is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/responsibles/%s" % (options.moduleset, options.responsible_group)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_moduleset_to_moduleset(self, options):
if options.attach is None or options.parent_moduleset is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/modulesets/%s" % (options.parent_moduleset, options.moduleset)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_moduleset_from_moduleset(self, options):
if options.detach is None or options.parent_moduleset is None or options.moduleset is None:
return
_path = "/compliance/modulesets/%s/modulesets/%s" % (options.parent_moduleset, options.moduleset)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def rename_moduleset(self, options):
if options.rename is None or options.moduleset is None or options.to is None:
return
data = {
"modset_name": options.to,
}
_path = "/compliance/modulesets/%s" % options.moduleset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdModule(Cmd):
command = "module"
desc = "Handle design actions on a compliance module"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List a moduleset modules")
parser.add_option("--add", default=None, action="store_true", dest="add",
help="Add a module to a moduleset")
parser.add_option("--remove", default=None, action="store_true", dest="remove",
help="Remove a module from a moduleset")
parser.add_option("--set", default=None, action="store_true", dest="set",
help="Set module properties")
parser.add_option("--rename", default=None, action="store_true", dest="rename",
help="Rename a module")
parser.add_option("--module", default=None, action="store", dest="module",
help="The name or id of the module")
parser.add_option("--moduleset", default=None, action="store", dest="moduleset",
help="The name or id of the module's moduleset")
parser.add_option("--autofix", default=None, action="store_true", dest="autofix",
help="with --set, set the autofix property to true")
parser.add_option("--not-autofix", default=None, action="store_false", dest="autofix",
help="with --set, set the autofix property to false")
parser.add_option("--to", default=None, action="store", dest="to",
help="with --rename, set the new module name")
candidates_path = {
"--moduleset": "/compliance/modulesets",
"--module": "/compliance/modulesets//modules",
"--to": "/compliance/modulesets//modules",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.list_modules(options)
self.add_module(options)
self.remove_module(options)
self.set_module(options)
self.rename_module(options)
def list_modules(self, options):
if options.list is None or options.moduleset is None:
return
p = "/compliance/modulesets/%s/modules" % options.moduleset
o = CmdLs().cmd(p)
def add_module(self, options):
if options.add is None or options.moduleset is None or options.module is None:
return
global path
_path = "/compliance/modulesets/%s/modules" % options.moduleset
data = {
"modset_mod_name": options.module,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def remove_module(self, options):
if options.remove is None or options.moduleset is None or options.module is None:
return
global path
_path = "/compliance/modulesets/%s/modules/%s" % (options.moduleset, options.module)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def rename_module(self, options):
if options.rename is None or options.moduleset is None or options.module is None or options.to is None:
return
data = {
"modset_mod_name": options.to,
}
_path = "/compliance/modulesets/%s/modules/%s" % (options.moduleset, options.module)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_module(self, options):
if options.set is None or options.moduleset is None or options.module is None:
return
self.set_module_autofix(options)
def set_module_autofix(self, options):
if options.autofix is None:
return
data = {
"autofix": options.autofix,
}
_path = "/compliance/modulesets/%s/modules/%s" % (options.moduleset, options.module)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdVariable(Cmd):
command = "variable"
desc = "Handle design actions on a compliance variable"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List variables in a ruleset")
parser.add_option("--add", default=None, action="store_true", dest="add",
help="Add a variable to a ruleset")
parser.add_option("--remove", default=None, action="store_true", dest="remove",
help="Remove a variable from a ruleset")
parser.add_option("--copy", default=None, action="store_true", dest="copy",
help="Copy a variable to another ruleset")
parser.add_option("--move", default=None, action="store_true", dest="move",
help="Move a variable to another ruleset")
parser.add_option("--set", default=None, action="store_true", dest="set",
help="Set variable properties")
parser.add_option("--rename", default=None, action="store_true", dest="rename",
help="Rename a variable")
parser.add_option("--variable", default=None, action="store", dest="variable",
help="The name or id of the variable")
parser.add_option("--ruleset", default=None, action="store", dest="ruleset",
help="The name or id of the variable's ruleset")
parser.add_option("--to", default=None, action="store", dest="to",
help="With --rename, set the new variable name")
parser.add_option("--dest-ruleset", default=None, action="store", dest="dest_ruleset",
help="With --copy or --move, set the name or id of the destination ruleset")
parser.add_option("--class", default=None, action="store", dest="var_class",
help="With --set, set the variable class")
parser.add_option("--value", default=None, action="store", dest="var_value",
help="With --set, set the variable value")
parser.add_option("--value-edit", default=False, action="store_true", dest="var_value_edit",
help="With --set, spawn an editor on the variable expected data structure. Upon exit, the edited structure is saved as the variable value.")
candidates_path = {
"--ruleset": "/compliance/rulesets",
"--dest-ruleset": "/compliance/rulesets",
"--variable": "/compliance/rulesets//variable",
"--to": "/compliance/rulesets//variable",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.list_variables(options)
self.add_variable(options)
self.remove_variable(options)
self.set_variable(options)
self.rename_variable(options)
self.copy_variable(options)
self.move_variable(options)
def list_variables(self, options):
if options.list is None or options.ruleset is None:
return
p = "/compliance/rulesets/%s/variables" % options.ruleset
o = CmdLs().cmd(p)
def add_variable(self, options):
if options.add is None or options.ruleset is None or options.variable is None:
return
global path
_path = "/compliance/rulesets/%s/variables" % options.ruleset
data = {
"var_name": options.variable,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def remove_variable(self, options):
if options.remove is None or options.ruleset is None or options.variable is None:
return
global path
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def rename_variable(self, options):
if options.rename is None or options.ruleset is None or options.variable is None or options.to is None:
return
data = {
"var_name": options.to,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def copy_variable(self, options):
if options.copy is None or options.ruleset is None or options.variable is None or options.dest_ruleset is None:
return
data = {
"action": "copy",
"dst_ruleset": options.dest_ruleset,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.put(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def move_variable(self, options):
if options.move is None or options.ruleset is None or options.variable is None or options.dest_ruleset is None:
return
data = {
"action": "move",
"dst_ruleset": options.dest_ruleset,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.put(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_variable(self, options):
if options.set is None or options.ruleset is None or options.variable is None:
return
self.set_variable_class(options)
self.set_variable_value(options)
self.set_variable_value_edit(options)
def set_variable_class(self, options):
if options.var_class is None:
return
data = {
"var_class": options.var_class,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_variable_value(self, options):
if options.var_value is None:
return
data = {
"var_value": options.var_value,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_variable_value_edit(self, options):
if not options.var_value_edit:
return
# get variable class
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.get(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
variable = json.loads(bdecode(r.content))["data"][0]
variable_class = str(variable["var_class"])
variable_value = variable["var_value"]
# get form definition
_path = "/forms"
params = {
"query": "form_name="+variable_class,
}
r = requests.get(self.cli.api+_path, params=params, auth=self.cli.auth, verify=not self.cli.insecure)
form = json.loads(bdecode(r.content))["data"][0]
form_def = form["form_definition"]
output_format = form_def["Outputs"][0]["Format"]
# get current value
if output_format != "raw":
try:
variable_data = json.loads(variable_value)
except:
variable_data = None
else:
variable_data = variable_value
if variable_data is not None:
pass
elif output_format.endswith("dict"):
d = {}
for _input in form_def["Inputs"]:
if "Key" in _input:
k = _input["Key"]
else:
k = _input["Id"]
v = "<%s. %s>" % (_input.get("Type", ""), _input.get("Help", ""))
d[k] = v
else:
d = ""
if variable_data is not None:
text_data = variable_data
elif output_format == "raw":
text_data = ""
elif output_format == "list":
text_data = [d]
elif output_format == "list of dict":
text_data = [d]
elif output_format == "dict of dict":
text_data = {"": d}
elif output_format.startswith("dict"):
text_data = d
else:
print("unknow format")
return
import tempfile
f = tempfile.NamedTemporaryFile(prefix='variable_edit.')
fname = f.name
f.close()
with open(fname, "w") as f:
f.write(json.dumps(text_data, indent=4))
os.system(os.environ.get("EDITOR", "vi")+" "+fname)
with open(fname, "r") as f:
buff = f.read()
new_text_data = json.loads(buff)
os.unlink(fname)
if new_text_data == text_data:
print("canceled (no change done in the editor)")
return
data = {
"var_value": buff,
}
_path = "/compliance/rulesets/%s/variables/%s" % (options.ruleset, options.variable)
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdRuleset(Cmd):
command = "ruleset"
desc = "Handle design actions on a compliance ruleset"
parser = CmdOptionParser(description=desc)
parser.add_option("--list", default=None, action="store_true", dest="list",
help="List rulesets")
parser.add_option("--show", default=None, action="store_true", dest="show",
help="Show a ruleset design, with nesting")
parser.add_option("--create", default=None, action="store_true", dest="create",
help="Create a ruleset")
parser.add_option("--delete", default=None, action="store_true", dest="delete",
help="Delete a ruleset")
parser.add_option("--set", default=None, action="store_true", dest="set",
help="Set a ruleset property")
parser.add_option("--rename", default=None, action="store_true", dest="rename",
help="Rename a ruleset")
parser.add_option("--attach", default=None, action="store_true", dest="attach",
help="Attach the ruleset to a filterset, a ruleset or a moduleset")
parser.add_option("--detach", default=None, action="store_true", dest="detach",
help="Detach the ruleset from a filterset, a ruleset or a moduleset")
parser.add_option("--clone", default=None, action="store_true", dest="clone",
help="Clone a ruleset, including variables, filterset and ruleset-ruleset relations. Reset the publication as responsible groups.")
parser.add_option("--ruleset", default=None, action="store", dest="ruleset",
help="The name or id of the ruleset to manage")
parser.add_option("--filterset", default=None, action="store", dest="filterset",
help="The name or id of the filterset to attach or detach")
parser.add_option("--parent-ruleset", default=None, action="store", dest="parent_ruleset",
help="The name or id of the ruleset to attach to or detach from")
parser.add_option("--parent-moduleset", default=None, action="store", dest="parent_moduleset",
help="The name or id of the moduleset to attach to or detach from")
parser.add_option("--publication-group", default=None, action="store", dest="publication_group",
help="The name or id of the group to attach or detach as publication")
parser.add_option("--responsible-group", default=None, action="store", dest="responsible_group",
help="The name or id of the group to attach or detach as responsible")
parser.add_option("--public", default=None, action="store_true", dest="public",
help="With --set, set the public property to true")
parser.add_option("--not-public", default=None, action="store_false", dest="public",
help="With --set, set the public property to false")
parser.add_option("--contextual", default=None, action="store_true", dest="contextual",
help="With --set, set the type property to contextual")
parser.add_option("--explicit", default=None, action="store_false", dest="explicit",
help="With --set, set the type property to explicit")
parser.add_option("--to", default=None, action="store", dest="to",
help="with --rename, set the new ruleset name")
candidates_path = {
"--ruleset": "/compliance/rulesets",
"--parent-moduleset": "/compliance/modulesets",
"--parent-ruleset": "/compliance/rulesets",
"--publication-group": "/groups",
"--responsible-group": "/groups",
"--filterset": "/filtersets",
}
api_candidates = False
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
self.clone_ruleset(options)
self.list_rulesets(options)
self.show_ruleset(options)
self.create_ruleset(options)
self.delete_ruleset(options)
self.set_ruleset(options)
self.rename_ruleset(options)
self.attach_filterset_to_ruleset(options)
self.detach_filterset_from_ruleset(options)
self.attach_publication_group_to_ruleset(options)
self.detach_publication_group_from_ruleset(options)
self.attach_responsible_group_to_ruleset(options)
self.detach_responsible_group_from_ruleset(options)
self.attach_ruleset_to_ruleset(options)
self.attach_ruleset_to_moduleset(options)
self.detach_ruleset_from_ruleset(options)
self.detach_ruleset_from_moduleset(options)
def list_rulesets(self, options):
if options.list is None:
return
p = "/compliance/rulesets"
o = CmdLs().cmd(p)
def show_ruleset(self, options):
if options.show is None:
return
o = CmdShow()
data = o.get_data("/compliance/rulesets/"+options.ruleset)
o.print_ruleset(options.ruleset, data)
def clone_ruleset(self, options):
if options.clone is None or options.ruleset is None:
return
data = {
"action": "clone",
}
_path = "/compliance/rulesets/%s" % options.ruleset
r = requests.put(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def create_ruleset(self, options):
if options.create is None or options.ruleset is None:
return
global path
_path = "/compliance/rulesets"
data = {
"ruleset_name": options.ruleset,
}
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def delete_ruleset(self, options):
if options.delete is None or options.ruleset is None:
return
global path
_path = "/compliance/rulesets/"+options.ruleset
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_ruleset(self, options):
if options.set is None or options.ruleset is None:
return
self.set_ruleset_public(options)
self.set_ruleset_type(options)
def set_ruleset_public(self, options):
if options.public is None:
return
data = {
"ruleset_public": options.public,
}
_path = "/compliance/rulesets/%s" % options.ruleset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def set_ruleset_type(self, options):
if options.contextual is None and options.explicit is None:
return
if options.contextual is not None and options.explicit is not None:
print("don't set both --explicit and --contextual")
return
if options.contextual:
t = "contextual"
if options.explicit:
t = "explicit"
data = {
"ruleset_type": t,
}
_path = "/compliance/rulesets/%s" % options.ruleset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_publication_group_to_ruleset(self, options):
if options.attach is None or options.publication_group is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/publications/%s" % (options.ruleset, options.publication_group)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_publication_group_from_ruleset(self, options):
if options.detach is None or options.publication_group is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/publications/%s" % (options.ruleset, options.publication_group)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_responsible_group_to_ruleset(self, options):
if options.attach is None or options.responsible_group is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/responsibles/%s" % (options.ruleset, options.responsible_group)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_responsible_group_from_ruleset(self, options):
if options.detach is None or options.responsible_group is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/responsibles/%s" % (options.ruleset, options.responsible_group)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_filterset_to_ruleset(self, options):
if options.attach is None or options.filterset is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/filtersets/%s" % (options.ruleset, options.filterset)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_filterset_from_ruleset(self, options):
if options.detach is None or options.filterset is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/filtersets/%s" % (options.ruleset, options.filterset)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_ruleset_to_ruleset(self, options):
if options.attach is None or options.parent_ruleset is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/rulesets/%s" % (options.parent_ruleset, options.ruleset)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def attach_ruleset_to_moduleset(self, options):
if options.attach is None or options.parent_moduleset is None or options.ruleset is None:
return
_path = "/compliance/modulesets/%s/rulesets/%s" % (options.parent_moduleset, options.ruleset)
r = requests.post(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_ruleset_from_ruleset(self, options):
if options.detach is None or options.parent_ruleset is None or options.ruleset is None:
return
_path = "/compliance/rulesets/%s/rulesets/%s" % (options.parent_ruleset, options.ruleset)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def detach_ruleset_from_moduleset(self, options):
if options.detach is None or options.parent_moduleset is None or options.ruleset is None:
return
_path = "/compliance/modulesets/%s/rulesets/%s" % (options.parent_moduleset, options.ruleset)
r = requests.delete(self.cli.api+_path, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
def rename_ruleset(self, options):
if options.rename is None or options.ruleset is None or options.to is None:
return
data = {
"ruleset_name": options.to,
}
_path = "/compliance/rulesets/%s" % options.ruleset
r = requests.post(self.cli.api+_path, data=data, auth=self.cli.auth, verify=not self.cli.insecure)
self.print_content(r.content)
class CmdShow(Cmd):
api_candidates = True
command = "show"
desc = "Show a moduleset or a ruleset design and nesting."
parser = CmdOptionParser(description=desc)
def label(self, s):
s = s.rstrip(":") + ":"
if s in ("ruleset:", "type:", "public:", "filterset:", "stats:"):
return self.colorize(s, c=self.GREEN)
elif s in ("moduleset:"):
return self.colorize(s, c=self.DARKCYAN)
elif s in ("publication group:", "responsible group:"):
return self.colorize(s, c=self.BLUE)
elif s in ("variable:", "module:"):
return self.colorize(s, c=self.RED)
return s
def get_data(self, _path):
r = requests.get(self.cli.api+_path+"/export", auth=self.cli.auth, verify=not self.cli.insecure)
data = json.loads(bdecode(r.content))
# load hashes
self.rulesets = {}
for e in data.get("rulesets", []):
self.rulesets[e.get("ruleset_name")] = e
self.modulesets = {}
for e in data.get("modulesets", []):
self.modulesets[e.get("modset_name")] = e
self.filtersets = {}
for e in data.get("filtersets", []):
self.modulesets[e.get("fset_name")] = e
return data
def cmd(self, line):
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
_path = self.args_to_path(args)
data = self.get_data(_path)
_path = self.factorize_dot_dot(_path)
obj_type = _path.split("/")[-2]
obj_id = _path.split("/")[-1]
try:
obj_id = int(obj_id)
except:
pass
if obj_type == "modulesets":
self.print_moduleset(obj_id, data)
elif obj_type == "rulesets":
self.print_ruleset(obj_id, data)
elif obj_type == "filtersets":
self.print_filterset(obj_id, data)
else:
print("unsupported object type:", obj_type)
def print_moduleset(self, obj_id, data):
for e in data.get("modulesets", []):
if e.get("modset_name") == obj_id or e.get("id") == obj_id:
self.print_export_moduleset(e, data)
def print_ruleset(self, obj_id, data):
for e in data.get("rulesets", []):
if e.get("ruleset_name") == obj_id or e.get("id") == obj_id:
self.print_export_ruleset(e, data)
def print_filterset(self, obj_id, data):
for e in data.get("filtersets", []):
if e.get("fset_name") == obj_id or e.get("id") == obj_id:
self.print_export_filterset(e, data)
def iprint(self, *args, **vars):
lvl = vars.get("lvl", 0)
if lvl > 0:
args = [" "*lvl] + list(args)
print(*args)
def print_export_rulesets(self, data, lvl=0):
for e in data["rulesets"]:
self.print_export_ruleset(e, data, lvl=lvl)
def print_export_modulesets(self, data, lvl=0):
for e in data["modulesets"]:
self.print_export_moduleset(e, data, lvl=lvl)
def print_export_filtersets(self, data, lvl=0):
for e in data["filtersets"]:
self.print_export_filterset(e, data, lvl=lvl)
def print_export_ruleset(self, rset, data, lvl=0):
self.iprint(self.label("ruleset"), rset.get("ruleset_name"), lvl=lvl)
self.iprint(self.label("public"), rset.get("ruleset_public"), lvl=lvl+1)
self.iprint(self.label("type"), rset.get("ruleset_type"), lvl=lvl+1)
if rset.get("fset_name"):
self.iprint(self.label("filterset"), rset.get("fset_name"), lvl=lvl+1)
for e in rset.get("publications"):
self.iprint(self.label("publication group"), e, lvl=lvl+1)
for e in rset.get("responsibles"):
self.iprint(self.label("responsible group"), e, lvl=lvl+1)
for e in rset.get("variables"):
self.iprint(self.label("variable"), e.get("var_class"), e.get("var_name"), lvl=lvl+1)
for e in rset.get("rulesets"):
_e = self.rulesets.get(e)
if _e is None:
continue
self.print_export_ruleset(_e, data, lvl=lvl+1)
def print_export_moduleset(self, modset, data, lvl=0):
self.iprint(self.label("moduleset"), modset.get("modset_name"), lvl=lvl)
for e in modset.get("publications"):
self.iprint(self.label("publication group"), e, lvl=lvl+1)
for e in modset.get("responsibles"):
self.iprint(self.label("responsible group"), e, lvl=lvl+1)
for e in modset.get("modules"):
autofix = e.get("autofix")
if autofix:
autofix = "(autofix)"
else:
autofix = ""
self.iprint(self.label("module"), e.get("modset_mod_name"), autofix, lvl=lvl+1)
for e in modset.get("rulesets"):
_e = self.rulesets.get(e)
if _e is None:
continue
self.print_export_ruleset(_e, data, lvl=lvl+1)
for e in modset.get("modulesets"):
_e = self.modulesets.get(e)
if _e is None:
continue
self.print_export_moduleset(_e, data, lvl=lvl+1)
def print_export_filterset(self, rset, data, lvl=0):
self.iprint(self.label("filterset"), rset.get("fset_name"), lvl=lvl)
self.iprint(self.label("stats"), rset.get("fset_stats"), lvl=lvl+1)
for e in rset.get("filters"):
if e.get("filterset"):
self.iprint(self.colorize(str(e.get("f_order"))+":", c=self.RED),
e.get("f_log_op"),
e.get("filterset"),
lvl=lvl+1)
else:
f = e.get("filter")
self.iprint(self.colorize(str(e.get("f_order"))+":", c=self.RED),
e.get("f_log_op"),
f.get("f_table")+"."+f.get("f_field"), f.get("f_op"), f.get("f_value"),
lvl=lvl+1)
class CmdGet(Cmd):
api_candidates = True
command = "get"
desc = "Execute a GET request on the given API handler. The parameters can be set using -- ."
parser = CmdOptionParser(description=desc)
def cmd(self, line):
self.set_parser_options_from_cmdline(line)
global path
try:
options, args = self.parser.parse_args(args=shlex.split(line))
except Exception as e:
try: print(e)
except: pass
return
params = options.__dict__
_path = self.args_to_path(args)
r = requests.get(self.cli.api+_path, params=params, auth=self.cli.auth, verify=not self.cli.insecure)
try:
# try not to display \u0000 in the output
d = json.loads(bdecode(r.content))
self.print_content(json.dumps(d, ensure_ascii=False, indent=8))
except Exception as e:
self.print_content(r.content)
class CmdHistory(Cmd):
command = "history"
desc = "Display the commands history"
parser = CmdOptionParser(description=desc)
max_lines = 200
def candidates(self, p):
return []
def cmd(self, line):
n = readline.get_current_history_length()
if n > self.max_lines:
m = self.max_lines
else:
m = n
print("n", n)
print("m", m)
for i in range(n-m, n):
print("%-6d %s" % (i, readline.get_history_item(i)))
class CmdCd(Cmd):
api_candidates = True
command = "cd"
desc = "Change the current working directory in the API handlers tree."
parser = CmdOptionParser(description=desc)
prev_paths = ["/"]
max_prev_paths = 10
def append_to_prev_paths(self, p):
global path
if path == self.prev_paths[-1]:
return
self.prev_paths.append(copy.copy(path))
if len(self.prev_paths) <= self.max_prev_paths:
return
for i in range(len(self.prev_paths)-self.max_prev_paths):
dump = self.prev_paths.pop(0)
def set_new_path(self, p):
global path
self.append_to_prev_paths(p)
path = p
def cmd(self, line):
global path
m = re.match(r"^cd\s+(?P[% @\-\./\w]+)$", line)
if m is None:
return
p = m.group("path")
# handle "cd -"
if p == "-":
new_path = self.prev_paths.pop()
self.set_new_path(new_path)
return
if p != "/":
p = p.rstrip("/")
l = path.split("/")
v = p.split("/")
for elem in copy.copy(v):
if elem != "..":
break
l.pop()
v.pop(0)
new_path = "/".join(l)
if new_path == "":
new_path = "/"
p = "/".join(v)
if p == "":
self.set_new_path(new_path)
return
if p.startswith("/"):
new_path = p
else:
new_path += "/" + p
new_path = new_path.replace("//", "/")
if self.path_match_handlers_or_parents(new_path):
self.set_new_path(new_path)
return
print("path not found")
return
def path_match_handler(p, d):
if p == "/":
return True
p = p.rstrip("/")
pattern = d["pattern"]
if re.match(pattern, p) is not None:
return True
p += "/foo"
if re.match(pattern, p) is not None:
return True
return False
def path_match_handler_or_parents(p, d):
if p == "/":
return True
pattern = d["pattern"]
if re.match(pattern, p) is not None:
return True
for i in range(pattern.count("/")):
pattern = pattern[:pattern.rindex("/")]+"[/]*$"
if re.match(pattern, p) is not None:
return True
return False
class Completer(object):
def __init__(self, commands):
self.commands = commands
self.current_candidates = []
self.commands_h = {}
for c in commands:
self.commands_h[c.command] = c
self.base_commands = self.commands_h.keys()
def complete(self, text, state):
response = None
if state == 0:
# This is the first time for this text, so build a match list.
origline = readline.get_line_buffer()
begin = readline.get_begidx()
end = readline.get_endidx()
being_completed = origline[begin:end]
words = origline.split()
#print('origline=%s'% repr(origline))
#print('begin=%s'% begin)
#print('end=%s'% end)
#print('being_completed=%s'% being_completed)
#print('words=%s'% words)
self.current_candidates = sorted(self.base_commands)
try:
if begin == 0:
# first word
candidates = self.current_candidates
else:
# later word
command = " ".join(words)
while (command != ""):
if command in self.commands_h:
c = self.commands_h[command]
break
command = " ".join(command.split()[:-1])
if command == "":
raise KeyError("command not supported")
candidates = []
candidates += c.candidates(being_completed, words)
if being_completed:
# match options with portion of input
# being completed
self.current_candidates = [ w for w in candidates
if w.startswith(being_completed) ]
else:
# matching empty string so use all candidates
self.current_candidates = candidates
#print('candidates=%s', self.current_candidates)
except (KeyError, IndexError) as err:
self.current_candidates = []
try:
response = self.current_candidates[state]
except IndexError:
response = None
#print('complete(%s, %s) => %s'% (repr(text), state, response))
return response
class Cli(object):
def __init__(self, user=None, password=None, api=None):
self.user = user
self.password = password
self.api = api
self.parse_args()
self.read_config()
self.parse_options()
self.refresh_api()
self.commands = [
CmdCd(cli=self),
CmdLs(cli=self),
CmdHistory(cli=self),
CmdGet(cli=self),
CmdPost(cli=self),
CmdPut(cli=self),
CmdDelete(cli=self),
CmdShow(cli=self),
CmdHelp(cli=self),
CmdRuleset(cli=self),
CmdVariable(cli=self),
CmdModuleset(cli=self),
CmdModule(cli=self),
CmdFilter(cli=self),
CmdFilterset(cli=self),
CmdSysreport(cli=self),
CmdSafe(cli=self),
]
self.dispatch_noninteractive()
def dispatch(self, line):
if line.strip() == "":
return
for command in self.commands:
if not command.match(line):
continue
try:
return command.cmd(line)
except CliError as e:
print(str(e), file=sys.stderr)
return
print("command not found:", line)
def parse_options(self):
if self.user is None:
self.user = self.set_option("user")
if self.password is None:
self.password = self.set_option("password")
if self.api is None:
self.api = self.set_option("api")
self.insecure = self.set_option("insecure", False)
self.auth = (self.user, self.password)
self.config = self.options.config
if self.insecure and InsecureRequestWarning is not None:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
self.host = self.api.replace("https://", "").replace("http://", "")
if "/" in self.host:
self.host = self.host[:self.host.index("/")]
if not self.api.endswith("/rest/api"):
self.api = "https://" + self.host + "/init/rest/api"
self.save_config()
def save_config(self):
""" Save options if no config file is present yet.
"""
if os.path.exists(self.config):
return
if self.user is None or self.password is None or self.api is None or self.insecure is None:
return
print("initializing %s config file with provided parameters" % self.config)
self.conf.add_section(conf_section)
self.conf.set(conf_section, "user", self.user)
self.conf.set(conf_section, "password", self.password)
self.conf.set(conf_section, "api", self.api)
self.conf.set(conf_section, "insecure", self.insecure)
with open(self.config, 'w') as fp:
self.conf.write(fp)
os.chmod(self.config, 0o0600)
def refresh_api(self):
try:
self.api_o = Api(cli=self, refresh=self.options.refresh_api)
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit(1)
def read_config(self):
if os.path.exists(self.options.config):
s = os.stat(self.options.config)
if s.st_mode & stat.S_IWOTH:
print("set ", self.options.config, "mode to 0600")
os.chmod(self.options.config, 0o0600)
try:
self.conf = ConfigParser.RawConfigParser()
self.conf.read(self.options.config)
except:
pass
def set_option(self, o, default=None):
if self.options.__dict__[o] == "?":
if o == "password":
import getpass
return getpass.getpass()
else:
return raw_input(o+": ")
if self.options.__dict__[o] is not None:
return self.options.__dict__[o]
if self.conf.has_option(conf_section, o):
return self.conf.get(conf_section, o)
if default is not None:
return default
print("missing parameter: "+o, file=sys.stderr)
sys.exit(1)
def parse_args(self):
__ver = ""
__usage = "nodemgr collector cli [options] command"
desc = "A command line interface to manage and access data on the OpenSVC collector."
parser = optparse.OptionParser(version=__ver, usage=__usage, description=desc)
parser.add_option("--user", "-u", default=None,
action="store", dest="user",
help="The OpenSVC collector account user email")
parser.add_option("--password", "-p", default=None,
action="store", dest="password",
help="The OpenSVC collector account user password. Set to ? to prompt.")
parser.add_option("--api", "-a", default=None,
action="store", dest="api",
help="The OpenSVC collector api url")
parser.add_option("--refresh-api", default=False,
action="store_true", dest="refresh_api",
help="The OpenSVC collector api url")
parser.add_option("--insecure", "-i", default=None,
action="store_true", dest="insecure",
help="Ignore ssl certification verification")
parser.add_option("--config", "-c", default=conf_f,
action="store", dest="config",
help="The file the collector credentials and access url are read from, or written to if empty. Defaults to %s" % conf_f)
parser.add_option("--color", default="auto",
action="store", dest="color",
help="colorize output. possible values are : auto=guess based on tty presence, always|yes=always colorize, never|no=never colorize")
parser.add_option("--format", default="table",
action="store", dest="format",
help="format data as table, json, csv or yaml")
self.options, self.args = parser.parse_args()
if len(self.args) > 1 and self.args[0] == "collector" and self.args[1] == "cli":
self.args.pop(0)
self.args.pop(0)
def dispatch_noninteractive(self):
if len(self.args) > 0:
# non interactive mode
import subprocess
line = subprocess.list2cmdline(self.args)
try:
self.dispatch(line)
except ValueError as exc:
print(exc)
sys.exit(1)
except KeyboardInterrupt:
print("Interrupted")
sys.exit(1)
except Exception as e:
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
sys.exit(1)
sys.exit(0)
def readline_setup(self):
readline.parse_and_bind('tab: complete')
atexit.register(readline.write_history_file, history_f)
try:
readline.read_history_file(history_f)
except IOError:
pass
readline.set_completer(Completer(self.commands).complete)
delims = readline.get_completer_delims()
delims = delims.replace("-", "").replace("/", "").replace("@", "").replace("%", "")
readline.set_completer_delims(delims)
def input_loop(self):
self.readline_setup()
line = ''
while line not in ('exit', 'quit'):
try:
line = raw_input(self.host+":"+path+' # ')
self.dispatch(line)
except ValueError as exc:
print(exc)
readline.redisplay()
pass
except EOFError:
print()
return
except KeyboardInterrupt:
print()
readline.redisplay()
pass
except Exception as e:
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
def run(self):
self.input_loop()
if __name__ == "__main__":
cli = Cli()
cli.run()
opensvc-1.8~20170412/lib/rcSysReport.py 0000644 0001750 0001750 00000032460 13073467726 017707 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import sys
import shutil
import glob
import json
from stat import *
from subprocess import *
import rcExceptions as ex
from rcGlobalEnv import rcEnv
from rcUtilities import which, cmdline2list
class SysReport(object):
def __init__(self, node=None):
self.todo = [
('INC', os.path.join(rcEnv.pathetc, 'node.conf')),
('INC', os.path.join(rcEnv.pathetc, '*.conf')),
('INC', os.path.join(rcEnv.pathetc, 'sysreport.conf.d')),
]
self.changed = []
self.deleted = []
self.sysreport_conf_d = os.path.join(rcEnv.pathetc, "sysreport.conf.d")
self.sysreport_d = os.path.join(rcEnv.pathvar, "sysreport")
self.collect_d = os.path.join(self.sysreport_d, rcEnv.nodename)
self.collect_cmd_d = os.path.join(self.collect_d, "cmd")
self.collect_file_d = os.path.join(self.collect_d, "file")
self.collect_stat = os.path.join(self.collect_file_d, "stat")
self.full = [self.collect_stat]
self.stat_changed = False
self.root_uid = 0
self.root_gid = 0
self.node = node
self.archive_extension = '.tar'
self.send_rpc = "send_sysreport"
self.lstree_rpc = "sysreport_lstree"
def init(self):
self.init_dir(self.collect_d)
self.init_dir(self.collect_cmd_d)
self.init_dir(self.collect_file_d)
self.init_dir(self.sysreport_conf_d)
self.load_stat()
self.merge_todo()
def init_dir(self, fpath):
self.init_collect_d(fpath)
self.init_collect_d_ownership(fpath)
self.init_collect_d_perms(fpath)
def init_collect_d(self, fpath):
if not os.path.exists(fpath):
print("create dir", fpath)
os.makedirs(fpath)
def init_collect_d_perms(self, fpath):
s = os.stat(fpath)
mode = s[ST_MODE]
if mode != 16768:
print("set dir", fpath, "mode to 0600")
os.chmod(fpath, 0o0600)
def init_collect_d_ownership(self, fpath):
s = os.stat(fpath)
if s.st_uid != self.root_uid or s.st_gid != self.root_gid:
print("set dir", self.collect_d, "ownership to", self.root_uid, self.root_gid)
os.chown(self.collect_d, self.root_uid, self.root_gid)
def load_stat(self):
try:
self.stat = self._load_stat()
except:
self.stat = {}
def _load_stat(self):
with open(self.collect_stat, "r") as f:
buff = f.read()
l = json.loads(buff)
stat = {}
for e in l:
try:
stat[e["fpath"]] = e
except:
pass
return stat
def write_stat(self):
if not self.stat_changed:
return
self._write_stat()
def _write_stat(self):
l = []
for fpath in sorted(self.stat.keys()):
l.append(self.stat[fpath])
with open(self.collect_stat, "w") as f:
f.write(json.dumps(l, sort_keys=True, separators=[", ", ": "], indent=4))
def merge_todo(self):
for root, dnames, fnames in os.walk(self.sysreport_conf_d):
for fname in fnames:
fpath = os.path.join(self.sysreport_conf_d, fname)
s = os.stat(fpath)
mode = s[ST_MODE]
if mode & S_IWOTH:
print("skip %s config file: file mode is insecure ('other' has write permission)" % fpath)
continue
if s.st_uid != self.root_uid or s.st_gid != self.root_gid:
print("skip %s config file: file ownership is insecure (must be owned by root)" % fpath)
continue
with open(fpath, 'r') as f:
buff = f.read()
for line in buff.split("\n"):
line = line.strip()
if line.startswith("FILE"):
t = ("INC", line[4:].strip())
elif line.startswith("CMD"):
t = ("CMD", line[3:].strip())
elif line.startswith("DIR"):
t = ("INC", line[3:].strip())
elif line.startswith("GLOB"):
t = ("INC", line[4:].strip())
elif line.startswith("INC"):
t = ("INC", line[3:].strip())
elif line.startswith("EXC"):
t = ("EXC", line[3:].strip())
elif line == "":
continue
elif line.startswith("#"):
continue
elif line.startswith(";"):
continue
else:
print("unsupported item type:", line)
continue
if t not in self.todo:
self.todo.append(t)
# expand
inc = set([])
exc = set([])
self.cmds = set([])
for mode, s in self.todo:
if mode == "CMD":
self.cmds.add(s)
continue
l = []
for _s in glob.glob(s):
if os.path.isdir(_s):
l += self.find_files(_s)
else:
l.append(_s)
if mode == "INC":
inc |= set(l)
elif mode == "EXC":
exc |= set(l)
self.files = inc - exc
# find deleted
dst_files = self.find_files(self.collect_file_d)
n = len(self.collect_file_d)
dst_files = map(lambda x: x[n:], dst_files)
self.deleted = set(dst_files) - self.files - set(["/stat"])
# order file lists
self.files = sorted(list(self.files))
self.deleted = sorted(list(self.deleted))
# purge stat info of deleted files
for fpath in self.deleted:
if fpath in self.stat:
del(self.stat[fpath])
self.stat_changed = True
if self.collect_stat not in self.changed:
self.changed.append(self.collect_stat)
if self.collect_stat not in self.full:
self.full.append(self.collect_stat)
def cmdlist2fname(self, l):
fname = '(space)'.join(l)
fname = fname.replace('|','(pipe)')
fname = fname.replace('&','(amp)')
fname = fname.replace('$','(dollar)')
fname = fname.replace('^','(caret)')
fname = fname.replace('/','(slash)')
fname = fname.replace(':','(colon)')
fname = fname.replace(';','(semicolon)')
fname = fname.replace('<','(lt)')
fname = fname.replace('>','(gt)')
fname = fname.replace('=','(eq)')
fname = fname.replace('?','(question)')
fname = fname.replace('@','(at)')
fname = fname.replace('!','(excl)')
fname = fname.replace('#','(num)')
fname = fname.replace('%','(pct)')
fname = fname.replace('"','(dquote)')
fname = fname.replace("'",'(squote)')
return fname
def write(self, fpath, buff):
try:
with open(fpath, 'r') as f:
pbuff = f.read()
if buff != pbuff:
self.changed.append(fpath)
except IOError:
self.changed.append(fpath)
with open(fpath, 'w') as f:
f.write(buff)
self.full.append(fpath)
def collect_cmd(self, cmd):
l = cmdline2list(cmd)
if len(l) == 0:
print(" err: syntax error")
return
if not os.path.exists(l[0]):
return
if which(l[0]) is None:
print(" err: not executable")
return
fname = self.cmdlist2fname(l)
cmd_d = os.path.join(self.collect_cmd_d, fname)
p = Popen(l, stdout=PIPE, stderr=STDOUT, close_fds=True)
out, err = p.communicate()
if sys.version_info[0] >= 3:
out = out.decode("utf-8")
self.write(os.path.join(cmd_d), out)
def get_stat(self, fpath):
st = os.stat(fpath)
stat = {
"fpath": fpath,
"realpath": os.path.realpath(fpath),
"mode": oct(st[ST_MODE]),
"uid": st[ST_UID],
"gid": st[ST_GID],
"dev": st[ST_DEV],
"nlink": st[ST_NLINK],
"mtime": st[ST_MTIME],
"ctime": st[ST_CTIME],
}
return stat
def push_stat(self, fpath):
stat = self.get_stat(fpath)
cached_stat = self.stat.get(fpath)
if cached_stat is None:
self.stat[fpath] = stat
self.stat_changed = True
if self.collect_stat not in self.changed:
self.changed.append(self.collect_stat)
if self.collect_stat not in self.full:
self.full.append(self.collect_stat)
#print(" add %s stat info"%fpath)
return
for p in ("realpath", "mode", "uid", "gid", "dev", "nlink", "mtime", "ctime"):
if stat[p] != cached_stat[p]:
self.stat[fpath] = stat
self.stat_changed = True
if self.collect_stat not in self.changed:
self.changed.append(self.collect_stat)
if self.collect_stat not in self.full:
self.full.append(self.collect_stat)
#print(" change %s stat info"%fpath)
return
def collect_file(self, fpath):
if not os.path.exists(fpath):
return
if os.path.islink(fpath):
return
dst_d = self.collect_file_d + os.path.dirname(fpath)
fname = os.path.basename(fpath)
dst_f = os.path.join(dst_d, fname)
if os.path.isdir(dst_f):
# the fpath has changed from dir to file. cleanup the dst tree.
shutil.rmtree(dst_f)
if not os.path.exists(dst_d):
os.makedirs(dst_d)
self.push_stat(fpath)
try:
with open(fpath, 'r') as f:
buff = f.read()
with open(dst_f, 'r') as f:
pbuff = f.read()
if buff != pbuff:
self.changed.append(dst_f)
except IOError:
# in doubt, send ... git will know better on the collector
self.changed.append(dst_f)
except UnicodeDecodeError:
# binary file: skip
pass
shutil.copy2(fpath, dst_f)
self.full.append(dst_f)
def delete_collected(self, fpaths):
for fpath in fpaths:
self.delete_collected_one(fpath)
def delete_collected_one(self, fpath):
fp = self.collect_file_d + fpath
os.unlink(fp)
def find_files(self, fpath):
l = []
if not os.path.exists(fpath):
return l
for item in os.listdir(fpath):
_fpath = os.path.join(fpath, item)
if os.path.isdir(_fpath):
l += self.find_files(_fpath)
elif not os.path.islink(_fpath):
l.append(_fpath)
return l
def sysreport(self, force=False):
self.node.collector.init(self.send_rpc)
if self.node.collector.proxy is None:
print("no collector connexion. abort sysreport")
return 1
self.init()
print("collect directory is", self.collect_d)
for fpath in self.files:
self.collect_file(fpath)
for cmd in self.cmds:
self.collect_cmd(cmd)
self.delete_collected(self.deleted)
self.write_stat()
self.send(force)
def deleted_report(self):
print("files deleted:")
for fpath in sorted(self.deleted):
print(" "+fpath)
def send(self, force=False):
if force:
to_send = self.full
lstree_data = self.node.collector.call(self.lstree_rpc)
if lstree_data is None:
raise ex.excError("can not get lstree from collector")
n = len(self.collect_d)+1
self.deleted = sorted(list(set(lstree_data) - set("file/stat") - set(map(lambda x: x[n:], self.full))))
else:
to_send = self.changed
self.changed_report()
if len(self.deleted) > 0:
self.deleted_report()
if len(to_send) == 0 and len(self.deleted) == 0:
print("no change to report")
return
if len(to_send) > 0:
tmpf = self.archive(to_send)
else:
tmpf = None
print("sending sysreport")
self.node.collector.call(self.send_rpc, tmpf, self.deleted)
if tmpf is not None:
os.unlink(tmpf)
def archive(self, l):
import tarfile
import tempfile
f = tempfile.NamedTemporaryFile(prefix="sysreport.", suffix=self.archive_extension, dir=self.collect_d)
tmpf = f.name
f.close()
cwd = os.getcwd()
os.chdir(self.sysreport_d)
n = len(self.sysreport_d) + 1
print("creating tarball", tmpf)
tar = tarfile.open(tmpf, mode="w")
for fpath in l:
if len(fpath) < n:
print(" err: can not archive", fpath, "(fpath too short)")
continue
tar.add(fpath[n:])
tar.close()
os.chdir(cwd)
return tmpf
def changed_report(self):
if len(self.changed) > 0:
print("changed files:")
for fpath in self.changed:
print(" ", fpath)
opensvc-1.8~20170412/lib/rcVioServer.py 0000644 0001750 0001750 00000006225 13073467726 017661 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
import rcExceptions as ex
import os
import ConfigParser
from rcGlobalEnv import rcEnv
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def rcmd(cmd, manager, username, key):
_cmd = ['ssh', '-i', key, '@'.join((username, manager))]
_cmd += [cmd]
out, err, ret = justcall(_cmd)
if ret != 0:
print(' '.join(_cmd))
print(out)
raise ex.excError("ssh command execution error")
return out, err
class VioServers(object):
def __init__(self, objects=[]):
self.objects = []
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.arrays = []
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = {}
for s in conf.sections():
if self.filtering and s not in self.objects:
continue
if not conf.has_option(s, "type") or \
conf.get(s, "type") != "vioserver":
continue
try:
username = conf.get(s, 'username')
key = conf.get(s, 'key')
m[s] = [username, key]
except:
print("error parsing section", s)
pass
del(conf)
for name, creds in m.items():
username, key = creds
self.arrays.append(VioServer(name, username, key))
def __iter__(self):
for array in self.arrays:
yield(array)
class VioServer(object):
def __init__(self, name, username, key):
self.name = name
self.username = username
self.key = key
self.keys = ['lsmap', 'bootinfo', 'lsfware', 'lsdevattr', 'lsdevvpd', 'devsize']
def rcmd(self, cmd):
return rcmd(cmd, self.name, self.username, self.key)
def get_lsmap(self):
cmd = 'ioscli lsmap -all -fmt :'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_bootinfo(self):
cmd = 'for i in $(ioscli lsmap -all -field backing|sed "s/Backing device//"); do echo $i $(bootinfo -s $i) ; done'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsfware(self):
cmd = 'ioscli lsfware'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsdevattr(self):
cmd = 'for i in $(ioscli lsdev -type disk -field name -fmt .) ; do echo $i $(ioscli lsdev -dev $i -attr|grep ww_name);done'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsdevvpd(self):
cmd = 'for i in $(ioscli lsdev -type disk -field name -fmt .) ; do echo $i ; ioscli lsdev -dev $i -vpd;done'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_devsize(self):
cmd = 'for i in $(ioscli lsdev -type disk -field name -fmt .) ; do echo $i $(bootinfo -s $i);done'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
if __name__ == "__main__":
o = VioServers()
for vioserver in o:
print(vioserver.lsmap())
opensvc-1.8~20170412/lib/rcUtilities.py 0000644 0001750 0001750 00000050044 13073467726 017706 0 ustar jkelbert jkelbert from __future__ import print_function
import os, sys
import datetime
import logging
import socket
import re
from subprocess import *
from rcGlobalEnv import rcEnv
from functools import wraps
import lock
import json
PROTECTED_DIRS = [
'/',
'/bin',
'/boot',
'/dev',
'/dev/pts',
'/dev/shm',
'/home',
'/opt',
'/proc',
'/sys',
'/tmp',
'/usr',
'/var',
]
if os.name == 'nt':
close_fds = False
else:
close_fds = True
def lazy(fn):
"""
A decorator for on-demand initialization of a property
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
def lazy_initialized(self, attr):
"""
Return True if the lazy property has been initialized
"""
attr_name = '_lazy_' + attr
if hasattr(self, attr_name):
return True
return False
def set_lazy(self, attr, value):
"""
Set a as the object lazy property hidden property value
"""
attr_name = '_lazy_' + attr
setattr(self, attr_name, value)
def unset_lazy(self, attr):
"""
Unset lazy property hidden property, iow flush the cache
"""
attr_name = '_lazy_' + attr
if hasattr(self, attr_name):
delattr(self, attr_name)
def bdecode(buff):
if sys.version_info[0] < 3:
return buff
if type(buff) == str:
return buff
else:
try:
return str(buff, "utf-8")
except:
return str(buff, "ascii")
return buff
def is_string(s):
""" python[23] compatible
"""
if sys.version_info[0] == 2:
l = (str, unicode)
else:
l = (str)
if isinstance(s, l):
return True
return False
def mimport(*args, **kwargs):
def fmt(s):
if len(s) > 1:
return s[0].upper()+s[1:].lower()
elif len(s) == 1:
return s[0].upper()
else:
return ""
mod = ""
for i, e in enumerate(args):
if e in ("res", "prov") and i == 0:
mod += e
else:
mod += fmt(e)
try:
return __import__(mod+rcEnv.sysname)
except ImportError:
pass
try:
return __import__(mod)
except ImportError:
pass
if kwargs.get("fallback", True) and len(args) > 1:
args = args[:-1]
return mimport(*args, **kwargs)
else:
raise ImportError("no module found")
def ximport(base):
mod = base + rcEnv.sysname
try:
m = __import__(mod)
return m
except:
pass
return __import__(base)
def check_privs():
if os.name == 'nt':
return
if os.getuid() != 0:
import copy
l = copy.copy(sys.argv)
l[0] = os.path.basename(l[0]).replace(".py", "")
print('Insufficient privileges. Try:\n sudo ' + ' '.join(l))
sys.exit(1)
def banner(text, ch='=', length=78):
spaced_text = ' %s ' % text
banner = spaced_text.center(length, ch)
return banner
def is_exe(fpath):
"""Returns True if file path is executable, False otherwize
does not follow symlink
"""
if os.path.isdir(fpath):
return False
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def which(program):
if program is None:
return
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if os.path.isfile(program) and is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return
def justcall(argv=['/bin/false']):
"""subprosses call argv, return (stdout,stderr,returncode)
"""
if which(argv[0]) is None:
return ("", "", 1)
process = Popen(argv, stdout=PIPE, stderr=PIPE, close_fds=close_fds)
stdout, stderr = process.communicate(input=None)
return bdecode(stdout), bdecode(stderr), process.returncode
def empty_string(buff):
b = buff.strip(' ').strip('\n')
if len(b) == 0:
return True
return False
def call(argv,
cache=False, # serve/don't serve cmd output from cache
log=None, # callers should provide there own logger
# or we'll have to allocate a generic one
info=False, # False: log cmd as debug
# True: log cmd as info
outlog=False, # False: discard stdout
errlog=True, # False: discard stderr
# True: log stderr as err, warn or info
# depending on err_to_warn and
# err_to_info value
outdebug=True, # True: log.debug cmd stdout
# False: skip log.debug stdout
errdebug=True, # True: log.debug cmd stderr
# False: skip log.debug stderr
# depending on err_to_warn and
# err_to_info value
err_to_warn=False,
err_to_info=False,
warn_to_info=False,
shell=False,
preexec_fn=None,
cwd=None,
env=None):
""" return(ret, out,err)
"""
if log is None:
log = logging.getLogger('CALL')
if not argv or len(argv) == 0:
return (0, '', '')
if shell:
cmd = argv
else:
cmd = ' '.join(argv)
if not shell and which(argv[0]) is None:
log.error("%s does not exist or not in path or is not executable"%
argv[0])
return (1, '', '')
if info:
log.info(cmd)
else:
log.debug(cmd)
if not hasattr(rcEnv, "call_cache"):
rcEnv.call_cache = {}
if cache and cmd not in rcEnv.call_cache:
log.debug("cache miss for '%s'"%cmd)
if not cache or cmd not in rcEnv.call_cache:
process = Popen(argv, stdout=PIPE, stderr=PIPE, close_fds=close_fds, shell=shell, preexec_fn=preexec_fn, cwd=cwd, env=env)
buff = process.communicate()
buff = tuple(map(lambda x: bdecode(x), buff))
ret = process.returncode
if ret == 0:
log.debug("store '%s' output in cache"%cmd)
rcEnv.call_cache[cmd] = buff
elif cmd in rcEnv.call_cache:
log.debug("discard '%s' output from cache because ret!=0"%cmd)
del rcEnv.call_cache[cmd]
elif cache:
log.debug("skip store '%s' output in cache because ret!=0"%cmd)
else:
log.debug("serve '%s' output from cache"%cmd)
buff = rcEnv.call_cache[cmd]
ret = 0
if not empty_string(buff[1]):
if err_to_info:
log.info('stderr:\n' + buff[1])
elif err_to_warn:
log.warning('stderr:\n' + buff[1])
elif errlog:
if ret != 0:
log.error('stderr:\n' + buff[1])
elif warn_to_info:
log.info('command successful but stderr:\n' + buff[1])
else:
log.warning('command successful but stderr:\n' + buff[1])
elif errdebug:
log.debug('stderr:\n' + buff[1])
if not empty_string(buff[0]):
if outlog:
if ret == 0:
log.info('output:\n' + buff[0])
elif err_to_info:
log.info('command failed with stdout:\n' + buff[0])
elif err_to_warn:
log.warning('command failed with stdout:\n' + buff[0])
else:
log.error('command failed with stdout:\n' + buff[0])
elif outdebug:
log.debug('output:\n' + buff[0])
return (ret, buff[0], buff[1])
def qcall(argv=['/bin/false']) :
"""qcall Launch Popen it args disgarding output and stderr"""
if not argv:
return (0, '')
process = Popen(argv, stdout=PIPE, stderr=PIPE, close_fds=close_fds)
process.wait()
return process.returncode
def vcall(args, **kwargs):
kwargs["info"] = True
kwargs["outlog"] = True
return call(args, **kwargs)
def getmount(path):
path = os.path.abspath(path)
while path != os.path.sep:
if not os.path.islink(path) and os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
def protected_dir(path):
path = path.rstrip("/")
if path in PROTECTED_DIRS:
return True
return False
def protected_mount(path):
mount = getmount(path)
if mount in PROTECTED_DIRS:
return True
return False
def printplus(obj):
"""
Pretty-prints the object passed in.
"""
# Dict
if isinstance(obj, dict):
for k, v in sorted(obj.items()):
print("%s: %s" % (str(k), str(v)))
# List or tuple
elif isinstance(obj, list) or isinstance(obj, tuple):
for x in obj:
print(x)
# Other
else:
print(obj)
def cmdline2list(cmdline):
"""
Translate a command line string into a list of arguments, using
using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
# Step 1: Translate all literal quotes into QUOTE. Justify number
# of backspaces before quotes.
tokens = []
bs_buf = ""
QUOTE = 1 # \", literal quote
for c in cmdline:
if c == '\\':
bs_buf += c
elif c == '"' and bs_buf:
# A quote preceded by some number of backslashes.
num_bs = len(bs_buf)
tokens.extend(["\\"] * (num_bs//2))
bs_buf = ""
if num_bs % 2:
# Odd. Quote should be placed literally in array
tokens.append(QUOTE)
else:
# Even. This quote serves as a string delimiter
tokens.append('"')
else:
# Normal character (or quote without any preceding
# backslashes)
if bs_buf:
# We have backspaces in buffer. Output these.
tokens.extend(list(bs_buf))
bs_buf = ""
tokens.append(c)
# Step 2: split into arguments
result = [] # Array of strings
quoted = False
arg = [] # Current argument
tokens.append(" ")
for c in tokens:
if c == '"':
# Toggle quote status
quoted = not quoted
elif c == QUOTE:
arg.append('"')
elif c in (' ', '\t'):
if quoted:
arg.append(c)
else:
# End of argument. Output, if anything.
if arg:
result.append(''.join(arg))
arg = []
else:
# Normal character
arg.append(c)
return result
def try_decode(string, codecs=['utf8', 'latin1']):
for i in codecs:
try:
return string.decode(i)
except:
pass
return string
def getaddr_cache_set(name, addr):
cache_d = os.path.join(rcEnv.pathvar, "cache", "addrinfo")
if not os.path.exists(cache_d):
os.makedirs(cache_d)
cache_f = os.path.join(cache_d, name)
with open(cache_f, 'w') as f:
f.write(addr)
return addr
def getaddr_cache_get(name):
cache_d = os.path.join(rcEnv.pathvar, "cache", "addrinfo")
if not os.path.exists(cache_d):
os.makedirs(cache_d)
cache_f = os.path.join(cache_d, name)
if not os.path.exists(cache_f):
raise Exception("addrinfo cache empty for name %s" % name)
cache_mtime = datetime.datetime.fromtimestamp(os.stat(cache_f).st_mtime)
limit_mtime = datetime.datetime.now() - datetime.timedelta(minutes=16)
if cache_mtime < limit_mtime:
raise Exception("addrinfo cache expired for name %s (%s)" % (name, cache_mtime.strftime("%Y-%m-%d %H:%M:%S")))
with open(cache_f, 'r') as f:
addr = f.read()
if addr.count(".") != 3 and ":" not in addr:
raise Exception("addrinfo cache corrupted for name %s: %s" % (name, addr))
return addr
def getaddr(name, cache_fallback, log=None):
if cache_fallback:
return getaddr_caching(name, log=log)
else:
return getaddr_non_caching(name)
def getaddr_non_caching(name, log=None):
a = socket.getaddrinfo(name, None)
if len(a) == 0:
raise Exception("could not resolve name %s: empty dns request resultset" % name)
addr = a[0][4][0]
try:
getaddr_cache_set(name, addr)
except Exception as e:
if log:
log.warning("failed to cache name addr %s, %s: %s" %(name, addr, str(e)))
return addr
def getaddr_caching(name, log=None):
try:
addr = getaddr_non_caching(name)
except Exception as e:
if log:
log.warning("%s. fallback to cache." % str(e))
addr = getaddr_cache_get(name)
if log:
log.info("fetched %s address for name %s from cache" % (addr, name))
return addr
def convert_bool(s):
if str(s).lower() in ("yes", "y", "true", "t", "1"):
return True
if str(s).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"):
return False
raise Exception('Invalid value for boolean conversion: ' + str(value))
def convert_size(s, _to='', _round=1):
l = ['', 'K', 'M', 'G', 'T', 'P', 'Z', 'E']
if type(s) in (int, float):
s = str(s)
s = s.strip().replace(",", ".")
if len(s) == 0:
return 0
if s == '0':
return 0
size = s
unit = ""
for i, c in enumerate(s):
if not c.isdigit() and c != '.':
size = s[:i]
unit = s[i:].strip()
break
if 'i' in unit:
factor = 1000
else:
factor = 1024
if len(unit) > 0:
unit = unit[0].upper()
size = float(size)
try:
start_idx = l.index(unit)
except:
raise Exception("unsupported unit in converted value: %s" % s)
for i in range(start_idx):
size *= factor
if 'i' in _to:
factor = 1000
else:
factor = 1024
if len(_to) > 0:
unit = _to[0].upper()
else:
unit = ''
if unit == 'B':
unit = ''
try:
end_idx = l.index(unit)
except:
raise Exception("unsupported target unit: %s" % unit)
for i in range(end_idx):
size /= factor
size = int(size)
d = size % _round
if d > 0:
size = (size // _round) * _round
return size
def cidr_to_dotted(s):
i = int(s)
_in = ""
_out = ""
for i in range(i):
_in += "1"
for i in range(32-i):
_in += "0"
_out += str(int(_in[0:8], 2))+'.'
_out += str(int(_in[8:16], 2))+'.'
_out += str(int(_in[16:24], 2))+'.'
_out += str(int(_in[24:32], 2))
return _out
def to_dotted(s):
s = str(s)
if '.' in s:
return s
return cidr_to_dotted(s)
def hexmask_to_dotted(mask):
mask = mask.replace('0x', '')
s = [str(int(mask[i:i+2], 16)) for i in range(0, len(mask), 2)]
return '.'.join(s)
def dotted_to_cidr(mask):
if mask is None:
return ''
cnt = 0
l = mask.split(".")
l = map(lambda x: int(x), l)
for a in l:
cnt += str(bin(a)).count("1")
return str(cnt)
def to_cidr(s):
if '.' in s:
return dotted_to_cidr(s)
elif re.match("^(0x)*[0-9a-f]{8}$", s):
# example: 0xffffff00
s = hexmask_to_dotted(s)
return dotted_to_cidr(s)
return s
def term_width():
default = 78
try:
# python 3.3+
return os.get_terminal_size().columns
except:
pass
if rcEnv.sysname == "Windows":
return default
if which("stty") is None:
return default
out, err, ret = justcall(['stty', '-a'])
m = re.search('columns\s+(?P\d+);', out)
if m:
return int(m.group('columns'))
try:
return int(os.environ["COLUMNS"])
except Exception as e:
pass
return default
def get_cache_d():
return os.path.join(rcEnv.pathvar, "cache", rcEnv.session_uuid)
def cache(sig):
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
if len(args) > 0 and hasattr(args[0], "log"):
log = args[0].log
else:
log = None
if len(args) > 0 and hasattr(args[0], "cache_sig_prefix"):
_sig = args[0].cache_sig_prefix + sig
else:
_sig = sig
fpath = cache_fpath(_sig)
try:
lfd = lock.lock(timeout=30, delay=0.1, lockfile=fpath+'.lock', intent="cache")
except Exception as e:
self.log.warning("cache locking error: %s. run command uncached." % str(e))
return fn(*args, **kwargs)
try:
data = cache_get(fpath, log=log)
except Exception as e:
if log:
log.debug(str(e))
data = fn(*args, **kwargs)
cache_put(fpath, data, log=log)
lock.unlock(lfd)
return data
return decorator
return wrapper
def cache_fpath(sig):
cache_d = get_cache_d()
if not os.path.exists(cache_d):
try:
os.makedirs(cache_d)
except:
# we run unlocked here ...
# another process created the dir since we tested ?
pass
fpath = os.path.join(cache_d, sig)
return fpath
def cache_put(fpath, data, log=None):
if log:
log.debug("cache PUT: %s" % fpath)
try:
with open(fpath, "w") as f:
json.dump(data, f)
except Exception as e:
try:
os.unlink(fpath)
except:
pass
return data
def cache_get(fpath, log=None):
if not os.path.exists(fpath):
raise Exception("cache MISS: %s" % fpath)
if log:
log.debug("cache GET: %s" % fpath)
try:
with open(fpath, "r") as f:
data = json.load(f)
except Exception as e:
raise ex.excError("cache read error: %s" % str(e))
lock.unlock(lfd)
return data
def clear_cache(sig, o=None):
if o and hasattr(o, "cache_sig_prefix"):
sig = o.cache_sig_prefix + sig
fpath = cache_fpath(sig)
if not os.path.exists(fpath):
return
if o and hasattr(o, "log"):
o.log.debug("cache CLEAR: %s" % fpath)
lfd = lock.lock(timeout=30, delay=0.1, lockfile=fpath+'.lock')
try:
os.unlink(fpath)
except:
pass
lock.unlock(lfd)
def purge_cache():
import time
import shutil
cache_d = os.path.join(rcEnv.pathvar, "cache")
if not os.path.exists(cache_d) or not os.path.isdir(cache_d):
return
for d in os.listdir(cache_d):
d = os.path.join(cache_d, d)
if not os.path.isdir(d) or not os.stat(d).st_ctime < time.time()-(21600):
# session more recent than 6 hours
continue
try:
shutil.rmtree(d)
except:
pass
if __name__ == "__main__":
#print("call(('id','-a'))")
#(r,output,err)=call(("/usr/bin/id","-a"))
#print("status: ", r, "output:", output)
print(convert_size("10000 KiB", _to='MiB', _round=3))
print(convert_size("10M", _to='', _round=4096))
opensvc-1.8~20170412/lib/svcmgr.py 0000644 0001750 0001750 00000021640 13073467726 016707 0 ustar jkelbert jkelbert # coding: utf8
"""
This executable is wrapped by the opensvc shell script.
It's the entrypoint for all OpenSVC services management ops.
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import svcBuilder
import rcStatus
import rcColor
from svcmgr_parser import SvcmgrOptParser
import rcExceptions as ex
from rcUtilities import ximport
from rcGlobalEnv import Storage
def get_docker_argv(argv=None):
"""
Extract docker argv from svcmgr argv.
svcmgr acts as a wrapper for docker, setting the service-specific socket
if necessary.
"""
if argv is None:
argv = sys.argv[1:]
if len(argv) < 2:
return argv, []
if "docker" not in argv:
return argv, []
pos = argv.index('docker')
if len(argv) > pos + 1:
docker_argv = argv[pos+1:]
else:
docker_argv = []
argv = argv[:pos+1]
return argv, docker_argv
def get_minimal(action, options):
"""
Return True if the services can be built with minimal parsing
"""
if action in ("set", "unset"):
return True
if action == "get" and not options.eval:
return True
if action == "edit_config":
return True
if action == "delete":
if options.unprovision:
return False
else:
return True
if action.startswith("print_config"):
return True
if action.startswith("json_config"):
return True
if action.startswith("collector_"):
return True
return False
def get_build_kwargs(optparser, options, action):
"""
Return the service build function keyword arguments, deduced from
parsed command line options.
"""
build_kwargs = {}
build_kwargs["minimal"] = get_minimal(action, options)
if len(set(["svcnames", "status"]) & set(build_kwargs.keys())) == 0:
if os.environ.get("OSVC_SERVICE_LINK"):
build_kwargs["svcnames"] = [os.environ.get("OSVC_SERVICE_LINK")]
if hasattr(options, "svcs") and options.svcs is not None:
build_kwargs["svcnames"] = options.svcs.split(',')
if hasattr(options, "status") and options.status is not None:
build_kwargs["status"] = [rcStatus.status_value(s) for s in options.status.split(",")]
if hasattr(options, "primary") and options.primary is not None and \
hasattr(options, "secondary") and options.secondary is not None:
optparser.parser.error("--onlyprimary and --onlysecondary are exclusive")
if hasattr(options, "primary") and options.primary is not None:
build_kwargs["onlyprimary"] = options.primary
if hasattr(options, "secondary") and options.secondary is not None:
build_kwargs["onlysecondary"] = options.secondary
# don't autopush when the intent is to push explicitely
build_kwargs["autopush"] = action != "push"
build_kwargs["create_instance"] = action in ("create", "pull")
return build_kwargs
def do_svcs_action_detached(argv=None):
"""
Executes the services action in detached process mode, so that
a term/kill signal on the parent process does not abort the action.
Keyboard interrupts do abort the detached process though.
"""
ret = 0
try:
import subprocess
import signal
proc = subprocess.Popen([sys.executable, __file__] + argv + ["--daemon"],
stdout=None, stderr=None, stdin=None,
close_fds=True, cwd=os.sep,
preexec_fn=os.setsid)
proc.wait()
ret = proc.returncode
except KeyboardInterrupt as exc:
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
print("kill detached process")
ret = 1
except ex.excSignal as exc:
print("the action, detached as pid %d, "
"will continue executing" % proc.pid)
ret = 1
except Exception as exc:
print(exc, file=sys.stderr)
ret = 1
return ret
def do_svcs_action(node, options, action, argv):
"""
Execute the services action, switching between detached mode for
stop*/shutdown/unprovision/switch, and inline mode for other actions.
"""
ret = 0
if not options.daemon and ( \
action.startswith("stop") or \
action in ("shutdown", "unprovision", "switch") or \
(action == "delete" and options.unprovision == True)
):
ret = do_svcs_action_detached(argv)
else:
try:
ret = node.do_svcs_action(action, options)
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
return ret
def do_svc_create(node, svcnames, action, options, build_kwargs):
"""
Handle service creation command.
"""
ret = 0
try:
node.install_service(svcnames, fpath=options.config,
template=options.template)
except Exception as exc:
print(str(exc), file=sys.stderr)
ret = 1
if options.config is None and options.template is None:
data = getattr(svcBuilder, action)(svcnames, options.resource,
interactive=options.interactive,
provision=options.provision)
else:
data = {"rid": [], "ret": 0}
# if the user want to provision a resource defined via configuration
# file edition, he will set --rid or --tag or --subset to point
# the update command to it
options.rid = ",".join(data.get("rid", []))
# force a refresh of node.svcs
# don't push to the collector yet
try:
node.rebuild_services(svcnames, build_kwargs["minimal"])
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
if len(node.svcs) == 1 and (options.config or options.template):
node.svcs[0].setenv(options.env, options.interactive)
# setenv changed the service config file
# we need to rebuild again
try:
node.rebuild_services(svcnames, build_kwargs["minimal"])
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
if options.provision:
if len(node.svcs) == 1 and ( \
options.config or \
options.template \
):
node.svcs[0].action("provision", options)
if ret != 0:
return ret
return data["ret"]
def prepare_options(options):
"""
Prepare and return the options Storage() as expected by the Svc::action
and Node::do_svcs_action methods.
"""
opts = Storage()
for key, val in options.__dict__.items():
opts[key.replace("parm_", "")] = val
return opts
def _main(node, argv=None):
"""
Build the service list, full or minimal depending on the requested action.
Execute action-specific codepaths.
"""
build_err = False
svcnames = []
ret = 0
argv, docker_argv = get_docker_argv(argv)
optparser = SvcmgrOptParser()
options, action = optparser.parse_args(argv)
options = prepare_options(options)
options.docker_argv = docker_argv
rcColor.use_color = options.color
try:
node.options.format = options.format
except AttributeError:
pass
node.options.single_service = options.svcs is not None and \
len(options.svcs.split(",")) == 1
build_kwargs = get_build_kwargs(optparser, options, action)
if action != "create":
try:
node.build_services(**build_kwargs)
except ex.excError as exc:
print(exc, file=sys.stderr)
build_err = True
if node.svcs is not None and len(node.svcs) > 0:
svcnames = [svc.svcname for svc in node.svcs]
elif action == "create" and "svcnames" in build_kwargs:
svcnames = build_kwargs["svcnames"]
if len(svcnames) == 0:
if not build_err:
sys.stderr.write("No service specified. Try:\n"
" svcmgr -s [,]\n"
" svcmgr --status [,]\n"
" \n")
return 1
if action == "create":
return do_svc_create(node, svcnames, action, options, build_kwargs)
node.set_rlimit()
ret = do_svcs_action(node, options, action, argv=argv)
try:
import logging
logging.shutdown()
except:
pass
return ret
def main(argv=None):
"""
Instanciate a Node object.
Call the real deal making sure the node is finally freed.
"""
ret = 0
node_mod = ximport('node')
try:
node = node_mod.Node()
except Exception as exc:
print(exc, file=sys.stderr)
return 1
try:
ret = _main(node, argv=argv)
except ex.excError as exc:
print(exc, file=sys.stderr)
return 1
except KeyboardInterrupt:
return 1
finally:
node.close()
if ret is None:
ret = 0
return ret
if __name__ == "__main__":
ret = main()
sys.exit(ret)
opensvc-1.8~20170412/lib/resFsDarwin.py 0000644 0001750 0001750 00000015266 13073467726 017644 0 ustar jkelbert jkelbert import os
import rcMountsDarwin as rcMounts
import resFs as Res
from rcUtilities import qcall, protected_mount, getmount
from rcGlobalEnv import rcEnv
from rcLoopDarwin import file_to_loop
import rcExceptions as ex
from stat import *
def try_umount(self):
cmd = ['diskutil', 'umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
cmd = ['diskutil', 'umount', 'force', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
cmd = ['umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
""" don't try to kill process using the source of a
protected bind mount
"""
if protected_mount(self.mount_point):
return 1
""" best effort kill of all processes that might block
the umount operation. The priority is given to mass
action reliability, ie don't contest oprator's will
"""
cmd = ['sync']
(ret, out, err) = self.vcall(cmd, err_to_info=True)
for i in range(4):
nb_killed = self.killfuser(self.mount_point)
self.log.info('umount %s'%self.mount_point)
cmd = ['umount', self.mount_point]
ret = qcall(cmd)
if ret == 0 or nb_killed == 0:
break
if ret != 0:
self.log.info("no more process using %s, yet umount fails. try forced umount."%self.mount_point)
cmd = ['umount', '-f', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
return ret
class Mount(Res.Mount):
""" define FreeBSD mount/umount doAction """
def __init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size=None,
**kwargs):
self.Mounts = None
self.loopdevice = None
self.isloop = False
Res.Mount.__init__(self,
rid,
mount_point=mount_point,
device=device,
fs_type=fs_type,
mount_options=mount_options,
snap_size=snap_size,
**kwargs)
self.fsck_h = {
'hfs': {
'bin': 'fsck',
'cmd': ['diskutil', 'repairVolume', self.device]
},
}
def killfuser(self, dir):
cmd = ['fuser', '-kmc', dir]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
""" return the number of process we sent signal to
"""
l = out.split(':')
if len(l) < 2:
return 0
return len(l[1].split())
def is_up(self):
self.Mounts = rcMounts.Mounts()
ret = self.Mounts.has_mount(self.device, self.mount_point)
if ret:
return True
if self.fs_type not in self.netfs:
try:
st = os.stat(self.device)
mode = st[ST_MODE]
except:
self.log.debug("can not stat %s" % self.device)
return False
if S_ISREG(mode):
# might be a loopback mount
devs = file_to_loop(self.device)
for dev in devs:
ret = self.Mounts.has_mount(dev, self.mount_point)
if ret:
return True
return False
def realdev(self):
dev = None
try:
mode = os.stat(self.device)[ST_MODE]
except:
self.log.debug("can not stat %s" % self.device)
return None
if S_ISCHR(mode):
dev = self.device
else:
mnt = getmount(self.device)
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
m = self.Mounts.has_param("mnt", mnt)
if m is None:
self.log.debug("can't find dev %(dev)s mounted in %(mnt)s in mnttab"%dict(mnt=mnt, dev=self.device))
return None
dev = m.dev
return dev
def disklist(self):
dev = self.realdev()
if dev is None:
return set([])
try:
statinfo = os.stat(dev)
except:
self.log.error("can not stat %s" % dev)
raise ex.excError
return set([dev])
def can_check_writable(self):
return True
def start(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
Res.Mount.start(self)
if self.fs_type in self.netfs or self.device == "none":
# TODO showmount -e
pass
else:
try:
mode = os.stat(self.device)[ST_MODE]
if S_ISREG(mode):
devs = file_to_loop(self.device)
if len(devs) > 0:
self.loopdevice = devs[0]
self.isloop = True
except:
self.log.debug("can not stat %s" % self.device)
return False
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return 0
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
if self.isloop is True:
cmd = ['hdiutil', 'attach', '-mountpoint', self.mount_point , self.device]
(ret, out, err) = self.vcall(cmd)
else:
self.fsck()
try:
cmd = ['diskutil', 'mount', '-mount_point', self.mount_point , self.device]
(ret, out, err) = self.vcall(cmd)
except:
if self.fs_type != "":
fstype = ['-t', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
cmd = ['mount']+fstype+mntopt+[self.device, self.mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.Mounts = None
self.can_rollback = True
def stop(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return
for i in range(3):
ret = try_umount(self)
if ret == 0: break
if ret != 0:
self.log.error('failed to umount %s'%self.mount_point)
raise ex.excError
self.Mounts = None
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/resDiskLoopLinux.py 0000644 0001750 0001750 00000004444 13073467726 020667 0 ustar jkelbert jkelbert import os
import re
from rcGlobalEnv import *
from rcUtilities import call, which
import rcStatus
import resDiskLoop as Res
import rcExceptions as ex
from rcLoopLinux import file_to_loop
class Disk(Res.Disk):
def is_up(self):
"""Returns True if the loop group is present and activated
"""
self.loop = file_to_loop(self.loopFile)
if len(self.loop) == 0:
return False
return True
def start(self):
if self.is_up():
self.log.info("%s is already up" % self.label)
return
cmd = [ 'losetup', '-f', self.loopFile ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.loop = file_to_loop(self.loopFile)
self.log.info("%s now loops to %s" % (', '.join(self.loop), self.loopFile))
self.can_rollback = True
def stop(self):
if not self.is_up():
self.log.info("%s is already down" % self.label)
return 0
for loop in self.loop:
cmd = [ 'losetup', '-d', loop ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def parent_dir_handled_by_service(self):
d = os.path.dirname(self.loopFile)
mntpts = {}
for r in self.svc.get_resources(["fs"]):
mntpts[r.mount_point] = r
while True:
if d in mntpts.keys():
return mntpts[d]
d = os.path.dirname(d)
if d == os.sep:
return
def _status(self, verbose=False):
r = self.parent_dir_handled_by_service()
if not os.path.exists(self.loopFile):
if r is None or (r and r.status() in (rcStatus.UP, rcStatus.STDBY_UP)):
self.status_log("%s does not exist" % self.loopFile)
if self.is_up(): return self.status_stdby(rcStatus.UP)
else: return self.status_stdby(rcStatus.DOWN)
def __init__(self, rid, loopFile, **kwargs):
Res.Disk.__init__(self, rid, loopFile, **kwargs)
def provision(self):
m = __import__("provDiskLoopLinux")
prov = m.ProvisioningDisk(self)
prov.provisioner()
def unprovision(self):
m = __import__("provDiskLoopLinux")
prov = m.ProvisioningDisk(self)
prov.unprovisioner()
opensvc-1.8~20170412/lib/resSyncNexenta.py 0000644 0001750 0001750 00000020144 13073467726 020355 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
import rcExceptions as ex
import rcStatus
import resSync
import datetime
from rcNexenta import Nexenta
class syncNexenta(resSync.Sync):
def can_sync(self, target=None):
try:
self.get_endpoints()
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
if self.ts is None:
self.get_props()
if self.skip_sync(self.ts):
return False
return True
def syncswap(self):
# only available from CLI ?
pass
def sync_update(self):
try:
self.get_endpoints()
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
if not self.can_sync() and not self.svc.options.force:
return
s = self.master.autosync_get_state(self.autosync)
if s == "disabled":
self.log.error("update not applicable: disabled")
return
if s == "running":
self.log.info("update not applicable: transfer in progress")
return
if s != "online":
self.log.error("update not applicable: %s state"%s)
return
self.master.autosync_execute(self.autosync)
self.log.info("autosync runner execution submitted")
def bind(self):
b = self.local.ssh_list_bindings()
found = False
for k in b:
user, hostport = k.split('@')
if hostport == self.remote.head:
found = True
break
if found:
self.log.info("%s head already bound"%self.remote.head)
else:
self.local.ssh_bind(self.remote.username, self.remote.head, self.remote.password)
self.log.info("%s head bound"%self.remote.head)
def unbind(self):
b = self.local.ssh_list_bindings()
done = False
for k in b:
user, hostport = k.split('@')
if hostport != self.remote.head:
continue
self.local.ssh_unbind(user, hostport, '1')
self.log.info("%s head unbound"%hostport)
done = True
if not done:
self.log.info("%s head already unbound"%self.remote.head)
def sync_resync(self):
try:
self.get_endpoints()
self.bind()
self.master.autosync_enable(self.autosync)
self.log.info("autosync enable submitted")
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
def sync_break(self):
try:
self.get_endpoints()
self.unbind()
self.master.autosync_disable(self.autosync)
self.log.info("autosync disable submitted")
self.wait_break()
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
def wait_break(self):
import time
timeout = 5
for i in range(timeout, 0, -1):
s = self.master.autosync_get_state(self.autosync)
if s == "disabled":
return
if i > 1:
time.sleep(2)
self.log.error("timed out waiting for disable to finish")
raise ex.excError
def start(self):
try:
self.get_endpoints()
self.local.set_can_mount(self.path)
self.log.info("set 'canmount = on' on %s"%self.path)
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
def stop(self):
pass
def get_props(self):
self.props = self.master.autosync_get_props(self.autosync)
# timestamp format : 15:34:09,May27
now = datetime.datetime.now()
try:
self.ts = datetime.datetime.strptime(str(now.year)+' '+self.props['zfs/time_started'], "%Y %H:%M:%S,%b%d")
if now < self.ts:
self.ts = datetime.datetime.strptime(str(now.year-1)+' '+self.props['zfs/time_started'], "%Y %H:%M:%S,%b%d")
except ValueError:
raise ex.excError("can not parse last sync date: %s"%self.props['zfs/time_started'])
self.age = now - self.ts
def _status(self, verbose=False):
ret = rcStatus.UP
try:
self.get_endpoints()
self.status_log("master head is %s"%self.master.head)
self.get_props()
except ex.excError as e:
if 'message' in e.value:
msg = e.value['message']
else:
msg = str(e)
self.status_log(msg)
return rcStatus.WARN
except:
self.status_log("unexpected error")
self.save_exc()
return rcStatus.UNDEF
limit = datetime.timedelta(minutes=self.sync_max_delay)
if self.age > limit:
self.status_log("last sync too old: %s ago"%str(self.age))
ret = rcStatus.WARN
s = self.master.autosync_get_state(self.autosync)
if s not in ['online', 'running']:
self.status_log("runner in '%s' state"%s)
ret = rcStatus.WARN
if ret == rcStatus.UP:
self.status_log("last sync %s ago"%str(self.age))
return ret
def get_endpoints(self):
""" determine which head is the replication master and
which is replication slave.
"""
if self.local is not None and self.remote is not None:
return
heads = list(set(self.filers.values()) - set([self.filer]))
if len(heads) != 1:
raise ex.excError("two heads need to be setup")
self.local = Nexenta(self.filer, self.log)
self.remote = Nexenta(heads[0], self.log)
prop = 'zfs/to-host'
try:
localdown = False
props = self.local.autosync_get_props(self.autosync)
if prop in props and props[prop] == self.filer:
self.slave = self.local
self.master = self.remote
else:
self.slave = self.remote
self.master = self.local
return
except ex.excError as e:
if 'does not exist' in str(e):
path_props = self.local.get_props(self.path)
if path_props is None:
raise ex.excError("path '%s' not found on local head '%s'"%(self.path, self.filer))
self.slave = self.local
self.master = self.remote
else:
# local head is down
localdown = True
try:
props = self.remote.autosync_get_props(self.autosync)
if prop in props and props[prop] == self.filer:
self.slave = self.local
self.master = self.remote
else:
self.slave = self.remote
self.master = self.local
return
except ex.excError as e:
if 'does not exist' in str(e):
path_props = self.remote.get_props(self.path)
if path_props is None:
raise ex.excError("path '%s' not found on remote head '%s'"%(self.path, self.filer))
self.slave = self.remote
self.master = self.local
elif localdown:
raise ex.excError("both heads unreachable")
def __init__(self,
rid=None,
name=None,
path=None,
filers={},
reversible=False,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.nexenta",
**kwargs)
self.label = "nexenta autosync %s"%name
self.autosync = name
self.filers = filers
self.path = path
self.reversible = reversible
self.filer = filers[rcEnv.nodename]
self.master = None
self.slave = None
self.ts = None
self.age = None
self.props = None
self.local = None
self.remote = None
def __str__(self):
return "%s autosync=%s" % (resSync.Sync.__str__(self),\
self.autosync)
opensvc-1.8~20170412/lib/snapJfs2AIX.py 0000644 0001750 0001750 00000010013 13073467726 017426 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import which, qcall, protected_mount
import rcExceptions as ex
import snap
class Snap(snap.Snap):
def lv_exists(self, device):
device = device.split("/")[-1]
ret = qcall(['lslv', device], cache=True)
if ret == 0:
return True
return False
def lv_info(self, device):
device = device.split("/")[-1]
(ret, buff, err) = self.call(['lslv', device], cache=True)
if ret != 0:
return (None, None, None)
vg_name = None
lv_name = None
lv_size = 0
prev = ''
prevprev = ''
pp_unit = ''
pps = 0
pp_size = 0
for word in buff.split():
if prev == "GROUP:":
vg_name = word
if prev == "VOLUME:":
lv_name = word
if prev == "SIZE:":
pp_size = int(word)
if prevprev == "SIZE:":
pp_unit = word
if prev == "PPs:" and prevprev != "STALE":
pps = int(word)
prevprev = prev
prev = word
if pps == 0 or pp_size == 0 or pp_unit == '' or vg_name is None:
self.log.error("logical volume %s information fetching error"%device)
print("pps = ", pps)
print("pp_size = ", pp_size)
print("pp_unit = ", pp_unit)
print("vg_name = ", vg_name)
raise ex.excError
if pp_unit == 'megabyte(s)':
mult = 1
elif pp_unit == 'gigabyte(s)':
mult = 1024
elif pp_unit == 'terabyte(s)':
mult = 1024*1024
else:
self.log.error("unexpected logical volume PP size unit: %s"%pp_unit)
raise ex.excError
return (vg_name, lv_name, pps*pp_size*mult)
def snapcreate(self, m):
snap_name = ''
snap_mnt = ''
(vg_name, lv_name, lv_size) = self.lv_info(m.device)
if lv_name is None:
self.log.error("can not snap %s: not a logical volume"%m.device)
raise ex.syncNotSnapable
if len(lv_name) > 12:
self.log.error("can not snap lv with name >12 chars")
raise ex.excError
snap_name = 'sy_'+os.path.basename(lv_name)
if self.lv_exists(os.path.join(vg_name, snap_name)):
self.log.error("snap of %s already exists"%(lv_name))
raise ex.syncSnapExists
print(lv_size)
print(lv_size//10)
(ret, buff, err) = self.vcall(['mklv', '-t', 'jfs2', '-y', snap_name, vg_name, str(lv_size//10)+'M'])
if ret != 0:
raise ex.syncSnapCreateError
snap_mnt = '/service/tmp/osvc_sync_'+os.path.basename(vg_name)+'_'+os.path.basename(lv_name)
if not os.path.exists(snap_mnt):
os.makedirs(snap_mnt, 0o755)
snap_dev = os.path.join(os.sep, 'dev', snap_name)
(ret, buff, err) = self.vcall(['snapshot', '-o', 'snapfrom='+m.mount_point, snap_dev])
if ret != 0:
raise ex.syncSnapMountError
(ret, buff, err) = self.vcall(['mount', '-o', 'snapshot', snap_dev, snap_mnt])
if ret != 0:
raise ex.syncSnapMountError
self.snaps[m.mount_point] = dict(lv_name=lv_name,
vg_name=vg_name,
snap_name=snap_name,
snap_mnt=snap_mnt,
snap_dev=snap_dev)
def snapdestroykey(self, s):
if protected_mount(self.snaps[s]['snap_mnt']):
self.log.error("the snapshot is no longer mounted in %s. panic."%self.snaps[s]['snap_mnt'])
raise ex.excError
""" fuser on HP-UX outs to stderr ...
"""
cmd = ['fuser', '-c', '-x', '-k', self.snaps[s]['snap_mnt']]
ret = qcall(cmd)
cmd = ['umount', self.snaps[s]['snap_mnt']]
(ret, out, err) = self.vcall(cmd)
cmd = ['snapshot', '-d', self.snaps[s]['snap_dev']]
(ret, buff, err) = self.vcall(cmd)
opensvc-1.8~20170412/lib/rcAdvfs.py 0000644 0001750 0001750 00000014365 13073467726 017004 0 ustar jkelbert jkelbert import os
import glob
from rcUtilities import call
class ExInit(Exception):
pass
class Fset(object):
"""
dor
Id : 46a70bfd.000964b0.6.8001
Files : 158, SLim= 0, HLim= 0
Blocks (1k) : 36035624, SLim= 0, HLim= 0
Quota Status : user=on group=on
Object Safety: off
Fragging : on
DMAPI : off
stock_systemes
Id : 4ad8612f.000923f8.1.8001
Clone is : stock_systemes@osvc_sync
Files : 499709, SLim= 0, HLim= 0
Blocks (1k) : 35305996, SLim= 0, HLim= 0
Quota Status : user=off group=off
Object Safety: off
Fragging : on
DMAPI : off
stock_systemes@osvc_sync
Id : 4ad8612f.000923f8.2.8001
Clone of : stock_systemes
Revision : 1
Object Safety: off
Fragging : on
DMAPI : off
"""
def __init__(self, lines):
for line in lines:
if not line.startswith('\t'):
self.name = line.strip()
elif "Id" in line:
self.fsetid = line.split(':')[-1].strip()
elif "Clone of" in line:
self.cloneof = line.split(':')[-1].strip()
elif "Clone is" in line:
self.cloneis = line.split(':')[-1].strip()
elif "Revision" in line:
self.revision = line.split(':')[-1].strip()
elif "Files" in line:
line = line[line.index(':')+1:]
l = line.split()
self.files_count = int(l[0].replace(',',''))
self.files_slim = int(l[2].replace(',',''))
self.files_hlim = int(l[4].replace(',',''))
elif "Blocks" in line:
line = line[line.index(':')+1:]
l = line.split()
self.block_count = int(l[0].replace(',',''))
self.block_slim = int(l[2].replace(',',''))
self.block_hlim = int(l[4].replace(',',''))
def fsname(self):
return "#".join((self.domain.name, self.name))
def __str__(self):
s = "fileset:\n"
s += " fsetid: %s\n" % self.fsetid
s += " name: %s\n" % self.name
s += " fsname: %s\n" % self.fsname()
s += " files_count: %d\n" % self.files_count
s += " files_slim: %d\n" % self.files_slim
s += " files_hlim: %d\n" % self.files_hlim
s += " block_count: %d\n" % self.block_count
s += " block_slim: %d\n" % self.block_slim
s += " block_hlim: %d\n" % self.block_hlim
return s
class Volume(object):
def __init__(self, s):
l = s.split()
if len(l) != 8:
raise ExInit()
self.volid = l[0]
self.size = int(l[1])
self.free = int(l[2])
self.used_pct = int(l[3].replace('%',''))
self.cmode = l[4]
self.rblks = int(l[5])
self.wblks = int(l[6])
self.name = l[7]
def __str__(self):
s = "volume:\n"
s += " volid: %s\n" % self.volid
s += " name: %s\n" % self.name
s += " size: %s\n" % self.size
s += " free: %s\n" % self.free
s += " used_pct: %s\n" % self.used_pct
s += " cmode: %s\n" % self.cmode
s += " rblks: %s\n" % self.rblks
s += " wblks: %s\n" % self.wblks
return s
class Fdmn(object):
def __init__(self, name):
self.used_pct = 0
self.size = 0
self.free = 0
cmd = ['showfdmn', name]
out, err, ret = call(cmd)
if ret != 0:
raise ExInit()
d = {}
"""
Id Date Created LogPgs Version Domain Name
46a70bfd.000964b0 Wed Jul 25 10:38:21 2007 512 4 dom1
Vol 1K-Blks Free % Used Cmode Rblks Wblks Vol Name
2L 62914560 21056568 67% on 256 256 /dev/disk/dsk13c
"""
lines = out.split('\n')
if len(lines) < 5:
raise ExInit()
header = lines[2].split()
self.domid = header[0]
self.name = header[-1]
self.version = header[-2]
self.logpgs = header[-3]
self.vols = {}
self.fsets = {}
for line in lines[5:]:
try:
v = Volume(line)
self += v
except ExInit:
pass
cmd = ['showfsets', '-k', name]
out, err, ret = call(cmd)
if ret != 0:
raise ExInit()
lines = out.split('\n')
n_lines = len(lines)
if n_lines == 0:
return
for i, line in enumerate(lines):
if i != 0 and not line.startswith('\t') or i == n_lines - 1:
f = Fset(lines[h:i])
self += f
if not line.startswith('\t'):
h = i
def __iadd__(self, o):
if type(o) == Volume:
self.size += o.size
self.free += o.free
self.used_pct = int(100. * (self.size - self.free) / self.size)
o.domain = self
self.vols[o.name] = o
elif type(o) == Fset:
o.domain = self
self.fsets[o.name] = o
return self
def __str__(self):
s = "domain:\n"
s += " domid: %s\n" % self.domid
s += " name: %s\n" % self.name
s += " version: %s\n" % self.version
s += " logpgs: %s\n" % self.logpgs
for v in self.vols.values():
s += str(v)
for v in self.fsets.values():
s += str(v)
return s
def list_volnames(self):
l = []
for v in self.vols.values():
l.append(v.name)
return l
class Fdmns(object):
def __init__(self):
self.load_fdmns()
def list_fdmns(self):
return self.fdmns.keys()
def load_fdmns(self):
self.fdmns = {}
for n in glob.glob('/etc/fdmns/*'):
n = os.path.basename(n)
if n.startswith('.'):
continue
self.fdmns[n] = {}
def load_fdmn(self, name):
d = Fdmn(name)
self.fdmns[name] = d
def get_fdmn(self, name):
if name not in self.fdmns:
return
if len(self.fdmns[name]) == 0:
self.load_fdmn(name)
return self.fdmns[name]
if __name__ == "__main__":
o = Fdmns()
print(o.list_fdmns())
d = o.get_fdmn('dom1')
print(d)
opensvc-1.8~20170412/lib/rcDiskInfoAIX.py 0000644 0001750 0001750 00000004062 13073467726 020002 0 ustar jkelbert jkelbert from rcUtilities import call, justcall
import rcDiskInfo
import re
class diskInfo(rcDiskInfo.diskInfo):
def __init__(self):
self.h = {}
def scan(self, lname):
vid = 'unknown'
pid = 'unknown'
wwid = 'unknown'
size = 'unknown'
cmd = ['lscfg', '-vpl', lname]
(ret, out, err) = call(cmd)
for f in out.split('\n'):
if "Manufacturer" in f:
vid = f.split('.')[-1]
if "Machine Type and Model" in f:
pid = f.split('.')[-1]
cmd = ['bootinfo', '-s', lname]
out, err, ret = justcall(cmd)
if ret == 0:
size = int(out.strip())
else:
size = 0
wwid = self.odmget(lname, 'ww_name').replace('0x', '')
if wwid == 'unknown':
wwid = self.get_vscsi_id(lname)
self.h[lname] = dict(vid=vid, pid=pid, wwid=wwid, size=size)
def get_vscsi_id(self, lname):
cmd = ['lscfg', '-l', lname]
(ret, out, err) = call(cmd)
if ret != 0:
return 'unknown'
l = out.split()
if len(l) < 2:
return 'unknown'
d = l[1]
regex = re.compile(r'-C[0-9]+-T[0-9]+')
d = regex.sub('', d)
return d
def odmget(self, lname, attr):
cmd = ['odmget', '-q', 'name='+lname+' AND attribute='+attr, 'CuAt']
(ret, out, err) = call(cmd)
for f in out.split('\n'):
if "value" not in f:
continue
return f.split(" = ")[-1].strip('"')
return 'unknown'
def devkey(self, dev):
dev = dev.replace("/dev/", "")
return dev
def get(self, dev, type):
dev = self.devkey(dev)
if dev not in self.h:
self.scan(dev)
return self.h[dev][type]
def disk_id(self, dev):
return self.get(dev, 'wwid')
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
return self.get(dev, 'size')
opensvc-1.8~20170412/lib/resFsSgHP-UX.py 0000644 0001750 0001750 00000001231 13073467726 017536 0 ustar jkelbert jkelbert from rcGlobalEnv import rcEnv
Res = __import__("resFsHP-UX")
class Mount(Res.Mount):
def __init__(self, **kwargs):
self.sgname = kwargs.get("device", None)
Res.Mount.__init__(self, **kwargs)
def is_up(self):
if 'resource' in self.svc.cmviewcl and \
self.mon_name in self.svc.cmviewcl['resource']:
state = self.svc.cmviewcl['resource'][self.mon_name][('status', rcEnv.nodename)]
if state == "up":
return True
else:
return False
else:
return Res.Mount.is_up(self)
def start(self):
pass
def stop(self):
pass
opensvc-1.8~20170412/lib/resSyncHp3par.py 0000644 0001750 0001750 00000023341 13073467726 020112 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import rcExceptions as ex
import rcStatus
import time
import resSync
import datetime
import rcHp3par as rc
class syncHp3par(resSync.Sync):
def __init__(self,
rid=None,
array=None,
method=None,
mode=None,
rcg_names={},
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.hp3par",
**kwargs)
self.array = array
self.rcg_names = rcg_names
self.rcg = rcg_names[array]
self.mode = mode
self.method = method
self.label = "hp3par %s %s"%(mode, self.rcg)
self.array_obj = None
self.remote_array_obj = None
def __str__(self):
return "%s array=%s method=%s mode=%s rcg=%s" % (
resSync.Sync.__str__(self),
self.array,
self.method,
self.mode,
self.rcg)
def on_add(self):
try:
arrays = rc.Hp3pars(objects=[self.array], log=self.log)
except Exception as e:
raise ex.excError(str(e))
if len(arrays.arrays) == 1:
self.array_obj = arrays.arrays[0]
if self.array_obj is None:
raise ex.excError("array %s is not accessible" % self.array)
self.array_obj.svcname = self.svc.svcname
def get_array_obj(self, target=None, log=False):
if target is None:
array_name = self.array
return self.array_obj
else:
array_name = target
if self.remote_array_obj is None:
try:
self.remote_array_obj = rc.Hp3pars(objects=[target], log=self.log).arrays[0]
if self.remote_array_obj is None:
raise ex.excError("array %s is not accessible" % array_name)
self.remote_array_obj.svcname = self.svc.svcname
return self.remote_array_obj
except Exception as e:
raise ex.excError(str(e))
def _cmd(self, cmd, target=None, log=False):
array_obj = self.get_array_obj(target=target, log=self.log)
if log:
if target is not None:
suffix = " (on " + target + ")"
else:
suffix = ""
self.log.info(cmd+suffix)
if log:
out, err = array_obj.rcmd(cmd, log=log)
else:
out, err = array_obj.rcmd(cmd)
if not log:
return out, err
if len(out) > 0:
self.log.info(out)
if len(err) > 0:
self.log.error(err)
raise ex.excError()
return out, err
def can_sync(self, target=None, s=None):
data = self.showrcopy()
last = data['vv'][0]['LastSyncTime']
if self.skip_sync(datetime.datetime.utcnow()-last):
return False
return True
def sync_resync(self):
self.sync_update()
def syncswap(self):
data = self.showrcopy()
if data['rcg']['Role'] == 'Primary':
self.log.error("rcopy group %s role is Primary. refuse to swap")
raise ex.excError()
self.stoprcopygroup()
self.setrcopygroup_reverse()
self.startrcopygroup()
def sync_update(self):
self.syncrcopygroup()
def sync_revert(self):
self.setrcopygroup_revert()
def sync_resume(self):
self.startrcopygroup()
def sync_quiesce(self):
self.stoprcopygroup()
def sync_break(self):
self.stoprcopygroup()
def start(self):
data = self.showrcopy()
target = data['rcg']['Target']
if self.is_splitted(target):
self.log.info("we are split from %s array" % target)
self.start_splitted()
else:
self.log.info("we are joined with %s array" % target)
self.start_joined()
def start_joined(self):
data = self.showrcopy()
if data['rcg']['Role'] == 'Primary':
self.log.info("rcopy group %s role is already Primary. skip" % self.rcg)
return
self.stoprcopygroup()
self.setrcopygroup_reverse()
def start_splitted(self):
self.setrcopygroup_failover()
def stop(self):
pass
def setrcopygroup_revert(self):
data = self.showrcopy()
if data['rcg']['Role'] != 'Primary-Rev':
self.log.error("rcopy group %s role is not Primary-Rev. refuse to setrcopygroup revert" % self.rcg)
return
self._cmd("setrcopygroup reverse -f -waittask -stopgroups -local -current %s" % self.rcg, log=True)
self.clear_caches()
def setrcopygroup_failover(self):
data = self.showrcopy()
if data['rcg']['Role'] == 'Primary-Rev':
self.log.info("rcopy group %s role is already Primary-Rev. skip setrcopygroup failover" % self.rcg)
return
self._cmd("setrcopygroup failover -f -waittask %s" % self.rcg, log=True)
self.clear_caches()
def setrcopygroup_reverse(self):
data = self.showrcopy()
if data['rcg']['Role'] == 'Primary':
self.log.info("rcopy group %s role is already Primary. skip setrcopygroup reverse" % self.rcg)
return
self._cmd("setrcopygroup reverse -f -waittask %s" % self.rcg, log=True)
self.clear_caches()
def syncrcopygroup(self):
data = self.showrcopy()
if data['rcg']['Role'] != 'Primary':
self.log.info("rcopy group %s role is not Primary. skip sync" % self.rcg)
return
if data['rcg']['Mode'] == 'Periodic':
self.log.info("skip syncrcopy as group %s is in periodic mode" % self.rcg)
return
self._cmd("syncrcopy -w %s" % self.rcg, log=True)
self.clear_caches()
def startrcopygroup(self):
data = self.showrcopy()
if data['rcg']['Status'] == "Started":
self.log.info("rcopy group %s is already started. skip startrcopygroup" % self.rcg)
return
if data['rcg']['Role'] != 'Primary':
self.log.error("rcopy group %s role is not Primary. refuse to start rcopy" % self.rcg)
raise ex.excError()
self._cmd("startrcopygroup %s" % self.rcg, log=True)
self.clear_caches()
def stoprcopygroup(self):
data = self.showrcopy()
if data['rcg']['Status'] == "Stopped":
self.log.info("rcopy group %s is already stopped. skip stoprcopygroup" % self.rcg)
return
if data['rcg']['Role'] == "Primary":
self._cmd("stoprcopygroup -f %s" % self.rcg, log=True)
else:
target = data['rcg']['Target']
self._cmd("stoprcopygroup -f %s" % self.rcg_names[target], target=target, log=True)
self.clear_caches()
def is_splitted(self, target):
data = self.showrcopy_links()
for h in data:
if h['Target'] != target:
continue
if h['Status'] == "Up":
return False
return True
def showrcopy_links(self):
"""
Target,Node,Address,Status,Options
baie-pra,0:2:4,20240002AC00992B,Down,
baie-pra,1:2:3,21230002AC00992B,Down,
receive,0:2:4,20240002AC00992B,Up,
receive,1:2:3,21230002AC00992B,Up,
"""
out, err = self._cmd("showrcopy links")
cols = ["Target", "Node", "Address", "Status", "Options"]
lines = out.split('\n')
data = []
for line in lines:
v = line.strip().split(",")
if len(v) != len(cols):
continue
h = {}
for a, b in zip(cols, v):
h[a] = b
data.append(h)
return data
def clear_caches(self):
self.array_obj.clear_showrcopy_cache()
def showrcopy(self):
return self.array_obj.showrcopy(self.rcg)
def _status(self, verbose=False):
if self.array_obj is None:
self.status_log("array %s is not accessible" % self.array)
return rcStatus.WARN
try:
data = self.showrcopy()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.UNDEF
elapsed = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.sync_max_delay)
r = None
if data['rcg']['Status'] != "Started":
self.status_log("rcopy group status is not Started (%s)"%data['rcg']['Status'])
r = rcStatus.WARN
if self.mode == "async" and data['rcg']['Mode'] != "Periodic":
self.status_log("rcopy group mode is not Periodic (%s)"%data['rcg']['Mode'])
r = rcStatus.WARN
if self.mode == "sync" and data['rcg']['Mode'] != "Sync":
self.status_log("rcopy group mode is not Sync (%s)"%data['rcg']['Mode'])
r = rcStatus.WARN
if self.mode == "async":
l = [o for o in data['rcg']['Options'] if o.startswith('Period ')]
if len(l) == 0:
self.status_log("rcopy group period option is not set")
r = rcStatus.WARN
if 'auto_recover' not in data['rcg']['Options']:
self.status_log("rcopy group auto_recover option is not set")
r = rcStatus.WARN
for vv in data['vv']:
if vv['SyncStatus'] != 'Synced':
self.status_log("vv %s SyncStatus is not Synced (%s)"%(vv['LocalVV'], vv['SyncStatus']))
r = rcStatus.WARN
if vv['LastSyncTime'] < elapsed:
self.status_log("vv %s last sync too old (%s)"%(vv['LocalVV'], vv['LastSyncTime'].strftime("%Y-%m-%d %H:%M")))
r = rcStatus.WARN
if r is not None:
return r
return rcStatus.UP
opensvc-1.8~20170412/lib/resScsiReservHP-UX.py 0000644 0001750 0001750 00000011734 13073467726 020775 0 ustar jkelbert jkelbert import resources as Res
import uuid
import re
import os
import time
import rcStatus
import rcExceptions as ex
from rcUtilities import which
from subprocess import *
import resScsiReserv
def mpath_to_path(disks):
l = []
for disk in disks:
if "/dev/dsk" in disk:
l.append(disk.replace("/dev/dsk", "/dev/rdsk"))
continue
if "/dev/disk" not in disk and "/dev/rdisk" not in disk:
continue
if not os.path.exists(disk):
continue
cmd = ['ioscan', '-F', '-m', 'dsf', disk]
p = Popen(cmd, stderr=None, stdout=PIPE, close_fds=True)
buff = p.communicate()
ret = p.returncode
if ret != 0:
continue
a = buff[0].split(':')
if len(a) != 2:
continue
b = a[1].split()
for d in b:
l.append(d.replace("/dev/dsk", "/dev/rdsk"))
return l
class ScsiReserv(resScsiReserv.ScsiReserv):
def __init__(self,
rid=None,
peer_resource=None,
no_preempt_abort=False,
**kwargs):
resScsiReserv.ScsiReserv.__init__(self,
rid=rid,
peer_resource=peer_resource,
no_preempt_abort=no_preempt_abort,
**kwargs)
self.prtype = 'wero'
self.leg_mpath_disable()
def get_disks(self):
if len(self.disks) > 0:
return
self.disks = mpath_to_path(self.peer_resource.disklist())
def scsireserv_supported(self):
if which('scu') is None:
return False
return True
def leg_mpath_disable(self):
cmd = ['scsimgr', 'get_attr', '-p', '-a', 'leg_mpath_enable']
p = Popen(cmd, stderr=None, stdout=PIPE, close_fds=True)
buff = p.communicate()
ret = p.returncode
if ret != 0:
self.log.error("can not fetch 'leg_mpath_enable' value")
raise ex.excError
if 'false' in buff[0]:
return
cmd = ['scsimgr', 'save_attr', '-a', 'leg_mpath_enable=false']
self.log.info(' '.join(cmd))
p = Popen(cmd, stderr=None, stdout=PIPE, close_fds=True)
buff = p.communicate()
ret = p.returncode
if ret != 0:
self.log.error("can not set 'leg_mpath_enable' value")
raise ex.excError
def ack_unit_attention(self, d):
return 0
def disk_registered(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'keys' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to read registrations for disk %s" % disk)
if self.hostid in out:
return True
return False
def disk_register(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'register', 'skey', self.hostid ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to register key %s with disk %s" % (self.hostid, disk))
return ret
def disk_unregister(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'register', 'skey', '0', 'key', self.hostid ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to unregister key %s with disk %s" % (self.hostid, disk))
return ret
def get_reservation_key(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'reservation' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to list reservation for disk %s" % disk)
if 'Reservation Key' not in out:
return None
for line in out.split('\n'):
if 'Reservation Key' in line:
return line.split()[-1]
raise Exception()
def disk_reserved(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'reservation' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to read reservation for disk %s" % disk)
if self.hostid in out:
return True
return False
def disk_release(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'release', 'key', self.hostid, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to release disk %s" % disk)
return ret
def disk_reserve(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'reserve', 'key', self.hostid, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to reserve disk %s" % disk)
return ret
def _disk_preempt_reservation(self, disk, oldkey):
cmd = [ 'scu', '-f', disk, 'preserve', 'preempt', 'key', self.hostid, 'skey', oldkey, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to preempt reservation for disk %s" % disk)
return ret
opensvc-1.8~20170412/lib/checkZpoolFreeBSD.py 0000777 0001750 0001750 00000000000 13073467726 024237 2checkZpoolSunOS.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/rcAssetOSF1.py 0000644 0001750 0001750 00000025242 13073467726 017445 0 ustar jkelbert jkelbert import os
from rcUtilities import justcall, which
import rcAsset
from distutils.version import LooseVersion as V
import re
sim = False
regex = re.compile("^\W*[0-9]*:")
class Asset(rcAsset.Asset):
def __init__(self, node):
rcAsset.Asset.__init__(self, node)
def convert(self, s, unit):
if unit == "M":
return int(float(s))
elif unit == "G":
return 1024 * int(float(s))
else:
return 0
def _get_mem_bytes(self):
# MB
cmd = ['vmstat', '-P']
out, err, ret = _justcall(cmd)
for line in out.split('\n'):
if 'Total' not in line:
continue
l = line.split()
s = l[-2]
unit = l[-1]
mem = self.convert(s, unit)
return str(mem)
return '0'
def _get_mem_banks(self):
return '0'
def _get_mem_slots(self):
return '0'
def _get_os_vendor(self):
return 'HP'
def _get_os_release(self):
cmd = ['uname', '-a']
out, err, ret = _justcall(cmd)
l = out.split()
return ' '.join(l[2:4])
def _get_os_kernel(self):
cmd = ['dupatch', '-track', '-type', 'kit', '-nolog']
out, err, ret = _justcall(cmd)
l = []
for line in out.split('\n'):
line = line.strip()
if not line.startswith('- T64') or not 'OSF' in line:
continue
l.append(line.split()[1])
if len(l) == 0:
return 'Unknown'
l.sort(lambda x, y: V(x) < V(y))
return l[-1].split('-')[0]
def _get_os_arch(self):
cmd = ['uname', '-a']
out, err, ret = _justcall(cmd)
l = out.split()
return l[-1]
def _get_cpu_freq(self):
cmd = ['psrinfo', '-v']
out, err, ret = _justcall(cmd)
for line in out.split('\n'):
if 'operates at' not in line:
continue
l = line.split()
if len(l) < 2:
continue
return l[-2]
return '0'
def _get_cpu_cores(self):
return self._get_cpu_dies()
def _get_cpu_dies(self):
cmd = ['psrinfo']
out, err, ret = _justcall(cmd)
return str(len(out.split('\n'))-1)
def _get_cpu_model(self):
cmd = ['psrinfo', '-v']
out, err, ret = _justcall(cmd)
for line in out.split('\n'):
if 'operates at' not in line:
continue
l = line.split()
if len(l) < 3:
continue
return l[2]
return 'Unknown'
def _get_serial(self):
cmd = ['consvar', '-g', 'sys_serial_num']
out, err, ret = _justcall(cmd)
l = out.split('=')
if len(l) == 2:
return l[1].strip()
return 'Unknown'
def _get_model(self):
cmd = ["hwmgr", "-v", "h"]
out, err, ret = _justcall(cmd)
for line in out.split('\n'):
if "platform" not in line:
continue
l = line.split("platform")
if len(l) != 2:
continue
return l[1].strip()
return 'Unknown'
def is_id(self, line):
if regex.match(line) is None:
return False
return True
def __get_hba(self):
# fc / fcoe
cmd = ['hwmgr', '-show', 'fibre', '-ada']
out, err, ret = _justcall(cmd)
hba = {}
for line in out.split('\n'):
if self.is_id(line):
l = line.split()
hba_name = l[1]
elif 'WWPN' in line:
l = line.split()
hba_portname = l[1].replace('-', '').lower()
hba[hba_name] = (hba_portname, 'fc')
return hba
def _get_hba(self):
hba = self.__get_hba()
return hba.values()
def _get_targets(self):
# fc / fcoe
cmd = ['hwmgr', '-show', 'fibre', '-topo']
out, err, ret = _justcall(cmd)
tgt = []
hba = self.__get_hba()
for line in out.split('\n'):
if self.is_id(line):
l = line.split()
hba_name = l[1]
elif line.strip().startswith('0x'):
l = line.split()
if l[1].startswith('-'):
continue
tgt_portname = l[2].replace('-', '').lower()
hba_portname = hba[hba_name][0]
tgt.append((hba_portname, tgt_portname))
return tgt
def _justcall(cmd):
if not sim:
return justcall(cmd)
data = {}
data[('hwmgr', '-show', 'fibre', '-ada')] = """
ADAPTER LINK LINK FABRIC SCSI CARD
HWID: NAME STATE TYPE STATE BUS MODEL
--------------------------------------------------------------------------------
53: emx0 up point-to-point attached scsi3 KGPSA-CA
Revisions: driver 2.17 firmware 3.93A0
FC Address: 0x1ece00
TARGET: -1
WWPN/WWNN: 1000-0000-c922-585c 2000-0000-c922-585c
ADAPTER LINK LINK FABRIC SCSI CARD
HWID: NAME STATE TYPE STATE BUS MODEL
--------------------------------------------------------------------------------
61: emx1 up point-to-point attached scsi4 KGPSA-CA
Revisions: driver 2.17 firmware 3.93A0
FC Address: 0x1cce00
TARGET: -1
WWPN/WWNN: 1000-0000-c924-a43d 2000-0000-c924-a43d
"""
data[('hwmgr', '-show', 'fibre', '-topo')] = """
ADAPTER LINK LINK FABRIC SCSI CARD
HWID: NAME STATE TYPE STATE BUS MODEL
--------------------------------------------------------------------------------
53: emx0 up point-to-point attached scsi3 KGPSA-CA
FC DID TARGET WWPN WWNN lfd LSIT
------------------------------------------------------------------------
0x382200 2 5000-1fe1-5012-9d49 5000-1fe1-5012-9d40 l-- L--T
0x381200 3 5000-1fe1-5012-9d4f 5000-1fe1-5012-9d40 l-- L--T
0x380200 0 5000-1fe1-5012-9d4d 5000-1fe1-5012-9d40 l-- L--T
0x383200 1 5000-1fe1-5012-9d4b 5000-1fe1-5012-9d40 l-- L--T
0xfffffc -1 21fc-0005-1e36-2110 1000-0005-1e36-2110 l-d ----
0xfffffe -1 20ce-0005-1e36-2110 1000-0005-1e36-2110 lf- ----
ADAPTER LINK LINK FABRIC SCSI CARD
HWID: NAME STATE TYPE STATE BUS MODEL
--------------------------------------------------------------------------------
61: emx1 up point-to-point attached scsi4 KGPSA-CA
FC DID TARGET WWPN WWNN lfd LSIT
------------------------------------------------------------------------
0xef1200 2 5000-1fe1-5012-9d4e 5000-1fe1-5012-9d40 l-- L--T
0xef3200 0 5000-1fe1-5012-9d4a 5000-1fe1-5012-9d40 l-- L--T
0xef0200 3 5000-1fe1-5012-9d4c 5000-1fe1-5012-9d40 l-- L--T
0xef2200 1 5000-1fe1-5012-9d48 5000-1fe1-5012-9d40 l-- L--T
0xfffffc -1 21fc-0005-1e36-1eee 1000-0005-1e36-1eee l-d ----
0xfffffe -1 20ce-0005-1e36-1eee 1000-0005-1e36-1eee lf- ----
"""
data[('hwmgr', '-show', 'scsi', '-full', '-id', '83', '-nowrap')] = """
SCSI DEVICE DEVICE DRIVER NUM DEVICE FIRST
HWID: DEVICEID HOSTNAME TYPE SUBTYPE OWNER PATH FILE VALID PATH
-------------------------------------------------------------------------
83: 16 wrus01 disk none 2 8 dsk13 [3/2/1]
WWID:01000010:6005-08b4-000b-440a-0000-f000-1325-0000
BUS TARGET LUN PATH STATE
---------------------------------
3 2 1 valid
3 3 1 valid
3 1 1 valid
3 0 1 valid
4 3 1 valid
4 2 1 valid
4 1 1 valid
4 0 1 valid
"""
data[('uname', '-a')] = """OSF1 wrus01 V5.1 2650 alpha
"""
data[('dupatch', '-track', '-type', 'kit', '-nolog')] = """
Gathering details of relevant patches, this may take a bit of time
Patches installed on the system came from following software kits:
------------------------------------------------------------------
- T64V51BB24AS0003-20030929 OSF540
- T64V51BB26AS0005-20050502 IOS540
- T64V51BB26AS0005-20050502 OSF540
================
kernelver
NOTE
When a patch kit is listed, it does not necessarily mean
all patches on that kit are installed on your system.
"""
data[('psrinfo',)] = """0 on-line since 05/19/2012 16:00:50
"""
data[('psrinfo', '-v')] = """Status of processor 0 as of: 06/15/12 18:35:40
Processor has been on-line since 05/19/2012 16:00:50
The alpha EV6.8CB (21264C) processor operates at 1000 MHz,
has a cache size of 8388608 bytes,
and has an alpha internal floating point processor.
"""
data[('consvar', '-g', 'sys_serial_num')] = """ sys_serial_num = AY14610125
"""
data[('hwmgr', '-v', 'h')] = """HWID: hardware hierarchy
-------------------------------------------------------------------------------
1: platform AlphaServer ES45 Model 2
2: cpu CPU0
6: bus iop0
7: bus hose0
"""
data[('ifconfig', '-a')] = """ee0: flags=c63
inet 10.6.65.37 netmask fffffc00 broadcast 10.6.67.255 ipmtu 1500
inet 10.6.66.160 netmask fffffc00 broadcast 10.6.67.255 ipmtu 1500
ee1: flags=c63
inet 10.40.32.241 netmask fffffc00 broadcast 10.40.35.255 ipmtu 1500
lo0: flags=100c89
inet 127.0.0.1 netmask ff000000 ipmtu 4096
sl0: flags=10
tun0: flags=80
tun1: flags=80
"""
data[('vmstat', '-P')] = """Total Physical Memory = 1024.00 M
"""
return data[tuple(cmd)], '', 0
if __name__ == "__main__":
o = Asset("wrus01")
print(o._get_mem_bytes())
print(o._get_os_release())
print(o._get_os_kernel())
print(o._get_os_arch())
print(o._get_cpu_freq())
print(o._get_cpu_cores())
print(o._get_cpu_dies())
print(o._get_cpu_model())
print(o._get_serial())
print(o._get_model())
print(o._get_hba())
print(o._get_targets())
opensvc-1.8~20170412/lib/checkFmFmadm.py 0000644 0001750 0001750 00000002023 13073467726 017705 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
class check(checks.check):
prefixes = [os.path.join(os.sep, "usr", "sbin")]
fmadm = "fmadm"
chk_type = "fm"
chk_name = "Solaris fmadm"
def find_fmadm(self):
if which(self.fmadm):
return self.fmadm
for prefix in self.prefixes:
fmadm = os.path.join(prefix, self.fmadm)
if os.path.exists(fmadm):
return fmadm
return
def do_check(self):
r = self.do_check_ldpdinfo()
return r
def do_check_ldpdinfo(self):
fmadm = self.find_fmadm()
if fmadm is None:
return self.undef
os.chdir(rcEnv.pathtmp)
cmd = [fmadm, 'faulty']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
r = []
r.append({
'chk_instance': 'faults ',
'chk_value': str(len(out)),
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/resSyncHp3parSnap.py 0000644 0001750 0001750 00000006455 13073467726 020743 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import rcExceptions as ex
import rcStatus
from rcUtilities import justcall
import resSync
import datetime
import rcHp3par as rc
class syncHp3parSnap(resSync.Sync):
def __init__(self,
rid=None,
array=None,
vv_names=[],
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.hp3parsnap",
**kwargs)
self.array = array
self.vv_names = vv_names
self.label = "hp3parsnap %s" % ", ".join(self.vv_names)
if len(self.label) > 50:
self.label = self.label[:47] + "..."
self.default_schedule = "@0"
def __str__(self):
return self.label
def on_add(self):
try:
arrays = rc.Hp3pars(objects=[self.array], log=self.log)
except Exception as e:
raise ex.excInitError(str(e))
if len(arrays.arrays) == 1:
self.array_obj = arrays.arrays[0]
else:
self.array_obj = None
if self.array_obj is None:
self.log.error("no 3par array object")
return
self.array_obj.svcname = self.svc.svcname
def can_sync(self, target=None, s=None):
data = self.showvv()
if len(data) < len(self.vv_names):
return False
last = self.lastsync_s_to_datetime(data[0]['CreationTime'])
if self.skip_sync(datetime.datetime.utcnow()-last):
return False
try:
self.check_requires("sync_update")
except ex.excError:
return False
return True
def updatevv(self):
self.array_obj.updatevv(vvnames=self.vv_names, log=self.log)
def sync_update(self):
self.updatevv()
self.array_obj.clear_caches()
def lastsync_s_to_datetime(self, s):
out, err, ret = justcall(["date", "--utc", "--date=%s" % s, '+%Y-%m-%d %H:%M:%S'])
d = datetime.datetime.strptime(out.strip(), "%Y-%m-%d %H:%M:%S")
return d
def showvv(self):
return self.array_obj.showvv(vvprov="snp", vvnames=self.vv_names, cols=["Name", "CreationTime"])
def _status(self, verbose=False):
if self.array_obj is None:
self.status_log("array %s is not accessible" % self.array)
return rcStatus.WARN
if not self.array_obj.has_virtualcopy():
self.status_log("array %s has no virtual copy license" % self.array)
return rcStatus.WARN
try:
data = self.showvv()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
r = None
if len(data) < len(self.vv_names):
missing = set(self.vv_names) - set([d["Name"] for d in data])
for m in missing:
self.status_log("missing vv: %s" % m)
r = rcStatus.WARN
elapsed = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.sync_max_delay)
for vv in data:
if self.lastsync_s_to_datetime(vv['CreationTime']) < elapsed:
self.status_log("vv %s last sync too old (%s)"%(vv['Name'], vv['CreationTime']))
r = rcStatus.WARN
if r is not None:
return r
return rcStatus.UP
opensvc-1.8~20170412/lib/rcNecIsm.py 0000644 0001750 0001750 00000024372 13073467726 017116 0 ustar jkelbert jkelbert from rcUtilities import which, justcall
import rcExceptions as ex
import os
class Nec(object):
arrays = []
view_bin = None
sc_query_bin = None
sc_linkinfo_bin = None
sc_unlink_bin = None
sc_link_bin = None
sc_create_bin = None
def get_bin(self, bin_attr, candidates):
if getattr(self, bin_attr) is not None:
return
for bin in candidates:
if which(bin) is not None:
setattr(self, bin_attr, bin)
break
if getattr(self, bin_attr) is None:
raise ex.excError('Can not find %s program in PATH' % ' or '.join(candidates))
def get_view_bin(self):
self.get_bin('view_bin', ['iSMcc_view', 'iSMview'])
def get_sc_query_bin(self):
self.get_bin('sc_query_bin', ['iSMsc_query'])
def get_sc_linkinfo_bin(self):
self.get_bin('sc_linkinfo_bin', ['iSMsc_linkinfo'])
def get_sc_link_bin(self):
self.get_bin('sc_link_bin', ['iSMsc_link'])
def get_sc_create_bin(self):
self.get_bin('sc_create_bin', ['iSMsc_create'])
def get_sc_unlink_bin(self):
self.get_bin('sc_unlink_bin', ['iSMsc_unlink'])
def view_cmd(self, cmd, on_array=True):
self.get_view_bin()
cmd = [self.view_bin] + cmd
if on_array:
cmd += [self.name]
return justcall(cmd)
def sc_query_cmd(self, cmd):
self.get_sc_query_bin()
cmd = [self.sc_query_bin] + cmd
return justcall(cmd)
def sc_linkinfo_cmd(self, cmd):
self.get_sc_linkinfo_bin()
cmd = [self.sc_linkinfo_bin] + cmd
return justcall(cmd)
def sc_unlink_cmd(self, cmd):
self.get_sc_unlink_bin()
cmd = [self.sc_unlink_bin] + cmd
self.log.info(' '.join(cmd))
return justcall(cmd)
def sc_link_cmd(self, cmd):
self.get_sc_link_bin()
cmd = [self.sc_link_bin] + cmd
self.log.info(' '.join(cmd))
return justcall(cmd)
def sc_create_cmd(self, cmd):
self.get_sc_create_bin()
cmd = [self.sc_create_bin] + cmd
self.log.info(' '.join(cmd))
return justcall(cmd)
def sc_create_ld(self, bv, sv):
cmd = ['-bv', bv, '-sv', sv, '-bvflg', 'ld', '-svflg', 'ld']
out, err, ret = self.sc_create_cmd(cmd)
self.log.info(out)
if ret != 0:
raise ex.excError(err)
def sc_unlink_ld(self, ld):
cmd = ['-lv', ld, '-lvflg', 'ld']
out, err, ret = self.sc_unlink_cmd(cmd)
self.log.info(out)
if ret != 0:
raise ex.excError(err)
def sc_link_ld(self, sv, ld):
cmd = ['-lv', ld, '-sv', sv, '-lvflg', 'ld', '-svflg', 'ld']
out, err, ret = self.sc_link_cmd(cmd)
self.log.info(out)
if ret != 0:
raise ex.excError(err)
def get_arrays(self):
cmd = ['-d']
out, err, ret = self.view_cmd(cmd, on_array=False)
if ret != 0:
self.refresh_vollist()
out, err, ret = self.view_cmd(cmd, on_array=False)
if ret != 0:
raise ex.excError(err)
"""
--- Disk Array List ---
Product ID Disk Array Name Resource State Monitoring
D1-10 D1_10 ready running
--- Disk Array List ---
Product ID Disk Array Name Resource State
Optima3600 Optima7_LMW ready
"""
lines = out.split('\n')
for line in lines:
if len(line) == 0:
continue
if '---' in line:
continue
if 'Product ID' in line:
continue
l = line.split()
if len(l) < 3:
continue
if self.filtering and l[1] not in self.objects:
continue
self.arrays.append(NecIsm(l[1]))
def sc_linkinfo_ld(self, vol):
"""
Specified Volume Information
SV:LD Name : test_src_0000_SV0014
Type : LX
Special File : -
State : link (test_src_0000_LV0064)
Mode : nr
Destination Volume Information
LV:test_src_0000_LV0050 LX link (test_src_0000_SV0016) rw
LV:test_src_0000_LV005A LX link (test_src_0000_SV0015) rw
LV:test_src_0000_LV0064 LX link (test_src_0000_SV0014) rw
"""
cmd = ['-vol', vol, '-volflg', 'ld', '-lcl']
out, err, ret = self.sc_linkinfo_cmd(cmd)
if ret != 0:
raise ex.excError(err)
data = {'dst': []}
for line in out.split('\n'):
if line.startswith('SV:LD Name'):
data['SV:LD Name'] = line.split(': ')[1]
elif line.strip().startswith('Type'):
data['Type'] = line.split(': ')[1]
elif line.strip().startswith('Special File'):
data['Special File'] = line.split(': ')[1]
elif line.strip().startswith('State'):
data['State'] = line.split(': ')[1]
elif line.strip().startswith('Mode'):
data['Mode'] = line.split(': ')[1]
elif line.strip().startswith('LV:'):
data['dst'].append(line.split(':')[1])
return data
def sc_query_ld(self, sv):
"""
BV Information
LD Name : test_src_0000
Type : LX
Special File : /dev/sdc
State : normal
Reserve Area : -
SV Information
LX:test_src_0000_SV0014 ( -1) snap/active [2014/03/24 11:16:16] link
"""
cmd = ['-sv', sv, '-svflg', 'ld']
out, err, ret = self.sc_query_cmd(cmd)
if ret != 0:
raise ex.excError(err)
data = {'sv': []}
for line in out.split('\n'):
if line.strip().startswith('LD Name'):
data['LD Name'] = line.split(': ')[1]
elif line.strip().startswith('Type'):
data['Type'] = line.split(': ')[1]
elif line.strip().startswith('Special File'):
data['Special File'] = line.split(': ')[1]
elif line.strip().startswith('State'):
data['State'] = line.split(': ')[1]
elif line.strip().startswith('Reserve Area'):
data['Reserve Area'] = line.split(': ')[1]
elif line.strip().startswith('LX:'):
data['sv'].append(line[line.index(':')+1:])
return data
def sc_query_bv_detail(self, bv):
"""
BV Information
LD Name : xxxxxxxxxxx_00CC
Type : LX
Special File : /dev/sdaq
State : normal
Reserve Area : -
Pair Information
SV:LD Name : xxxxxxxxxxx_00cc_SV00ce
Type : LX
Generation(Attribute): -1(normal)
Snap State : snap/active [2014/09/09 17:27:45]
Create Start Time : 2014/09/09 17:27:45
Processing Data Size : -
Snapshot Data Size : 47.4GB
SV Guard : off
LV Link Status : link
LV:LD Name : xxxxxxxxxxx_00cc_LV00cf
Type : LX
Special File : /dev/sdar
LV Access : rw
SV:LD Name : xxxxxxxxxxx_00cc_SV00cd
Type : LX
Generation(Attribute): -2(normal)
Snap State : snap/active [2014/09/09 17:19:12]
Create Start Time : 2014/09/09 17:19:12
Processing Data Size : -
Snapshot Data Size : 11.6GB
SV Guard : off
LV Link Status : link
LV:LD Name : xxxxxxxxxxx_00cc_LV00d0
Type : LX
LV Access : rw
"""
cmd = ['-bv', bv, '-bvflg', 'ld', '-detail']
out, err, ret = self.sc_query_cmd(cmd)
if ret != 0:
raise ex.excError(err)
data = {
'sv': {},
'lv': {}
}
section = ""
for line in out.split('\n'):
line = line.strip()
if line.startswith("BV Information"):
section = "bvinfo"
continue
elif line.startswith("Pair Information"):
section = "pairinfo"
continue
if section == "bvinfo" and line.startswith("LD Name"):
data['LD Name'] = line.split(': ')[1]
elif section == "bvinfo" and line.startswith("State"):
data['State'] = line.split(': ')[1]
elif section.startswith("pairinfo") and line.startswith("SV:LD Name"):
ld_name = line.split(': ')[1]
if ld_name not in data['sv']:
data['sv'][ld_name] = {}
section = "pairinfo_sv"
elif section == "pairinfo_sv" and line.startswith("Snap State"):
data['sv'][ld_name]["Snap State"] = line.split(': ')[1]
elif section.startswith("pairinfo") and line.startswith("LV:LD Name"):
ld_name = line.split(': ')[1]
if ld_name not in data['lv']:
data['lv'][ld_name] = {}
section = "pairinfo_lv"
elif section == "pairinfo_lv" and line.startswith("Type"):
data['lv'][ld_name]["Type"] = line.split(': ')[1]
return data
class NecIsms(Nec):
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.get_arrays()
def __iter__(self):
for array in self.arrays:
yield(array)
def refresh_vollist(self):
if which('iSMvollist') is None:
return
cmd = ['iSMvollist', '-r']
out, err, ret = justcall(cmd)
class NecIsm(Nec):
def __init__(self, name):
self.keys = ['all']
self.name = name
def get_all(self):
cmd = ['-all']
out, err, ret = self.view_cmd(cmd)
return out
if __name__ == "__main__":
o = NecIsms()
for necism in o:
print(necism.get_all())
#print(o.sc_linkinfo_ld("test_src_0000_SV0014"))
#print(o.sc_query_ld("test_src_0000_SV0014"))
opensvc-1.8~20170412/lib/resDiskGce.py 0000644 0001750 0001750 00000014017 13073467726 017431 0 ustar jkelbert jkelbert import resDisk
import os
import json
import rcStatus
import rcExceptions as ex
from rcGlobalEnv import *
from rcUtilities import justcall
import rcGce
class Disk(resDisk.Disk, rcGce.Gce):
def __init__(self,
rid=None,
type="disk.gce",
names=set([]),
gce_zone=None,
**kwargs):
resDisk.Disk.__init__(self,
rid=rid,
type=type,
**kwargs)
self.names = names
self.gce_zone = gce_zone
self.label = self.fmt_label()
def get_disk_names(self, refresh=False):
data = self.get_disks(refresh=refresh)
return [d["name"] for d in data]
def get_attached_disk_names(self, refresh=False):
data = self.get_attached_disks(refresh=refresh)
return [d["name"] for d in data]
def get_attached_disks(self, refresh=False):
if hasattr(self.svc, "gce_attached_disks") and not refresh:
return self.svc.gce_attached_disks
self.wait_gce_auth()
cmd = ["gcloud", "compute", "instances", "describe", rcEnv.nodename, "--format", "json", "--zone", self.gce_zone]
out, err, ret = justcall(cmd)
data = json.loads(out)
data = data.get("disks", [])
for i, d in enumerate(data):
data[i]["name"] = d["source"].split("/")[-1]
self.svc.gce_attached_disks = data
return self.svc.gce_attached_disks
def get_disks(self, refresh=False):
if hasattr(self.svc, "gce_disks") and not refresh:
return self.svc.gce_disks
self.wait_gce_auth()
cmd = ["gcloud", "compute", "disks", "list", "--format", "json", "--zone", self.gce_zone]
out, err, ret = justcall(cmd)
data = json.loads(out)
self.svc.gce_disks = data
return data
def fmt_label(self):
s = "gce volumes "
s += ", ".join(self.names)
return s
def has_it(self, name):
data = self.get_attached_disks()
disk_names = [d.get("name") for d in data]
if name in disk_names:
return True
return False
def up_count(self):
data = self.get_attached_disks()
disk_names = [d.get("name") for d in data]
l = []
for name in self.names:
if name in disk_names:
l.append(name)
return l
def validate_volumes(self):
existing = [d.get("name") for d in self.get_disks()]
non_exist = set(self.names) - set(existing)
if len(non_exist) > 0:
raise Exception("non allocated volumes: %s" % ', '.join(non_exist))
def _status(self, verbose=False):
try:
self.validate_volumes()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
l = self.up_count()
n = len(l)
unattached = sorted(list(set(self.names) - set(l)))
if n == len(self.names):
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_UP
return rcStatus.UP
elif n == 0:
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_DOWN
return rcStatus.DOWN
else:
self.status_log("unattached: "+", ".join(unattached))
return rcStatus.WARN
def detach_other(self, name):
existing = self.get_disks()
for d in existing:
if d["name"] != name:
continue
for user in d.get("users", []):
instance = user.split('/')[-1]
if instance != rcEnv.nodename:
self.vcall([
"gcloud", "compute", "instances", "detach-disk", "-q",
instance,
"--disk", name,
"--zone", self.gce_zone
])
def do_start_one(self, name):
existing = self.get_disk_names()
if name not in existing:
self.log.info(name+" does not exist")
return
attached = self.get_attached_disk_names()
if name in attached:
self.log.info(name+" is already attached")
return
self.detach_other(name)
self.vcall([
"gcloud", "compute", "instances", "attach-disk", "-q",
rcEnv.nodename,
"--disk", name,
"--zone", self.gce_zone,
"--device-name", self.fmt_disk_devname(name),
])
self.can_rollback = True
def do_start(self):
for name in self.names:
self.do_start_one(name)
self.get_attached_disks(refresh=True)
def do_stop_one(self, name):
existing = self.get_disk_names()
if name not in existing:
self.log.info(name+" does not exist")
return
attached = self.get_attached_disk_names()
if name not in attached:
self.log.info(name+" is already detached")
return
self.vcall([
"gcloud", "compute", "instances", "detach-disk", "-q",
rcEnv.nodename,
"--disk", name,
"--zone", self.gce_zone
])
def do_stop(self):
for name in self.names:
self.do_stop_one(name)
self.get_attached_disks(refresh=True)
def fmt_disk_devname(self, name):
index = self.names.index(name)
return ".".join([self.svc.svcname, self.rid.replace("#", "."), str(index)])
def devlist(self):
attached = self.get_attached_disks()
return set(["/dev/disk/by-id/google-"+d["deviceName"] for d in attached if d["name"] in self.names])
def disklist(self):
attached = self.get_attached_disks()
return set([d["deviceName"] for d in attached if d["name"] in self.names])
def provision(self):
m = __import__("provDiskGce")
prov = getattr(m, "ProvisioningDisk")(self)
prov.provisioner()
def unprovision(self):
m = __import__("provDiskGce")
prov = getattr(m, "ProvisioningDisk")(self)
prov.unprovisioner()
opensvc-1.8~20170412/lib/rcDevTree.py 0000644 0001750 0001750 00000024014 13073467726 017267 0 ustar jkelbert jkelbert """
Top devices are bare disks or multipath paths.
Bottom devices are formatted devices or devices given to
applications like raw database devices.
A relation describes a parent-child link. A 'used' size can
be arbitrarily set on a relation : DevRelation.set_used()
A logical volume lv0 with segments on pv1 pv2 has two parent
relations : lv0-pv1 and lv0-pv2
"""
try:
from hashlib import md5
def hash(s):
o = md5()
o.update(s)
return o.hexdigest()
except:
from rcMd5 import md5
def hash(s):
return md5(s).digest().encode('hex')
class DevRelation(object):
def __init__(self, parent, child, used=0):
self.child = child
self.parent = parent
self.used = used
self.used_set = False
def set_used(self, used):
self.used_set = True
self.used = used
def get_used(self, used):
#
# logical volumes and concatset need to set explicitly
# the 'used' size the child consumes on the parent.
#
if self.used_set:
return self.used
child = self.tree.get_dev(self.child)
if used == 0:
used = child.size
if child.devtype in ("multipath", "linear", "partition", "extent"):
return used
elif child.devtype in ("raid0"):
n = len(child.parents)
return used/n
elif child.devtype in ("raid1", "raid10"):
n = len(child.parents)
return used*2/n
elif child.devtype in ("raid5"):
n = len(child.parents)
return used/(n-1)
elif child.devtype in ("raid6"):
n = len(child.parents)
return used/(n-2)
raise Exception("unknown devtype %s for %s"%(child.devtype, child.devname))
def get_size(self, chain):
if self.used_set:
return self.used
if len(chain) < 2:
self.used = self.tree.get_dev(chain[-1].child).size
else:
self.used = chain[-2].used
return self.used
class Dev(object):
def __init__(self, devname, size, devtype):
self.devname = devname
self.devpath = []
self.alias = devname
self.size = size
self.devtype = devtype
# list of relations
self.parents = []
self.children = []
self.removed = False
def __iadd__(self, o):
pass
def remove(self, r):
# to implement for each os
r.log.info("remove method not implemented for device %s"%self.alias)
def set_alias(self, alias):
self.alias = alias
def get_dev(self, devname):
return self.tree.get_dev(devname)
def get_size(self):
return self.size
def set_devtype(self, devtype):
self.devtype = devtype
def set_devpath(self, devpath):
if devpath not in self.devpath:
self.devpath.append(devpath)
def print_dev(self, level=0, relation=None):
if relation is None:
parent_size = self.size
else:
parent_size = self.get_dev(relation.parent).get_size()
if parent_size == 0:
pct = 0
else:
pct = 100*self.size//parent_size
s = "%s %s %d %d%%\n" % (self.alias, self.devtype, self.size, pct)
for r in self.children:
d = self.get_dev(r.child)
for i in range(level+1):
s += " "
if d is None:
s += "unknown (dev %s not added)\n"%r.child
else:
s += d.print_dev(level=level+1, relation=r)
return s
def get_child(self, devname):
for r in self.children:
if r.parent == devname:
return r
return None
def get_parent(self, devname):
for r in self.parents:
if r.child == devname:
return r
return None
def add_child(self, devname, size=0, devtype=None):
r = self.get_child(devname)
if r is None:
r = self.tree.get_relation(self.devname, devname)
if r is None:
r = DevRelation(parent=self.devname, child=devname, used=size)
r.tree = self.tree
self.children.append(r)
self.tree.add_dev(devname, size, devtype)
return r
def add_parent(self, devname, size=0, devtype=None):
r = self.get_parent(devname)
if r is None:
r = self.tree.get_relation(devname, self.devname)
if r is None:
r = DevRelation(parent=devname, child=self.devname, used=size)
r.tree = self.tree
self.parents.append(r)
self.tree.add_dev(devname, size, devtype)
return r
def is_parent(self, devname):
for r in self.children:
if r.child == devname:
return True
d = self.get_dev(r.child)
if d.is_parent(devname):
return True
return False
def get_top_devs(self):
if len(self.parents) == 0 or self.devtype == "multipath":
return set([self])
d = set([])
for parent in self.parents:
dev = self.get_dev(parent.parent)
d |= dev.get_top_devs()
return d
def get_top_devs_chain(self, chain=[]):
if len(self.parents) == 0:
return [[self, chain]]
d = []
for parent in self.parents:
dev = self.get_dev(parent.parent)
d += dev.get_top_devs_chain(chain+[parent])
return d
def print_dev_bottom_up(self, level=0, chain=[]):
if len(chain) == 0:
used = self.size
else:
used = chain[-1].get_size(chain)
s = ""
for i in range(level+1):
s += " "
s += "%s %s %d %d"%(self.alias, self.devtype, self.size, used)
s += " %s"%self.devpath
print(s)
for parent in self.parents:
dev = self.get_dev(parent.parent)
#print(map(lambda x: (x.parent, x.child, x.used, x.get_size(chain+[parent]), x.used), chain+[parent]))
dev.print_dev_bottom_up(level+1, chain+[parent])
def get_parents_bottom_up(self, l=[]):
for parent in self.parents:
dev = self.get_dev(parent.parent)
l.append(dev)
l = dev.get_parents_bottom_up(l)
return l
def get_children_bottom_up(self):
l = self.get_children_top_down()
l.reverse()
return l
def get_children_top_down(self):
l = []
for child in self.children:
dev = self.get_dev(child.child)
l.append(dev)
l += dev.get_children_top_down()
return l
class DevTree(object):
dev_class = Dev
def __init__(self):
self.dev = {}
# root node of the relation tree
self.root = []
def __iadd__(self, o):
if isinstance(o, Dev):
o.tree = self
self.dev[o.devname] = o
if not self.has_relations(o.devname):
r = DevRelation(parent=None, child=o.devname, used=o.size)
r.tree = self
self.root.append(r)
return self
def __str__(self):
s = ""
for r in self.root:
s += self.dev[r.child].print_dev()
return s
def print_tree_bottom_up(self):
for dev in self.get_bottom_devs():
dev.print_dev_bottom_up()
def has_relations(self, devname):
l = []
for r in self.root:
if r.child == devname:
return True
d = self.get_dev(r.child)
if d.is_parent(devname):
return True
return False
def get_dev(self, devname):
if devname not in self.dev:
return None
return self.dev[devname]
def get_dev_by_devpath(self, devpath):
for dev in self.dev.values():
if devpath in dev.devpath:
return dev
return None
def blacklist(self, dev):
""" overload this fn with os specific implementation
"""
return False
def add_dev(self, devname, size=0, devtype=None):
if devname in self.dev:
return self.dev[devname]
if self.blacklist(devname):
return
d = self.dev_class(devname, size, devtype)
self += d
return d
def set_relation_used(self, parent, child, used):
for d in self.dev.values():
for r in d.children + d.parents:
if parent == r.parent and child == r.child:
r.set_used(used)
def get_relation(self, parent, child):
for d in self.dev.values():
for r in d.children + d.parents:
if parent == r.parent and child == r.child:
return r
return None
def get_bottom_devs(self):
return [self.dev[devname] for devname in self.dev if len(self.dev[devname].children) == 0]
def get_top_devs(self):
d = set([])
for dev in self.get_bottom_devs():
d |= dev.get_top_devs()
return list(d)
def get_used(self, chain):
used = 0
for rel in chain:
used = rel.get_used(used)
return used
def get_top_devs_usage_for_devpath(self, devpath):
dev = self.get_dev_by_devpath(devpath)
if dev is None:
return []
l = []
for d, chain in dev.get_top_devs_chain():
if len(chain) == 0:
used = d.size
region = 0
else:
used = self.get_used(chain)
ref = self.get_dev(chain[0].child).alias
region = hash(ref)
l.append((d.devpath[0], used, region))
return l
if __name__ == "__main__":
tree = DevTree()
d = tree.add_dev('/dev/sdb', 10000)
d.add_child('/dev/sdb1', 8000)
d.add_child('/dev/sdb2', 2000)
d = tree.add_dev('/dev/sdc', 20000)
d.add_child('/dev/mapper/vg01-foo', 1000)
d = tree.get_dev('/dev/sdb2')
d.add_child('/dev/mapper/vg01-foo', 1000)
d = tree.get_dev('/dev/mapper/vg01-foo')
d.add_child('foo.vmdk', 500)
print(tree)
opensvc-1.8~20170412/lib/rcDevTreeLinux.py 0000644 0001750 0001750 00000046774 13073467726 020330 0 ustar jkelbert jkelbert import rcDevTree
import glob
import os
import re
import math
from subprocess import *
from rcUtilities import which
from rcGlobalEnv import rcEnv
import rcDevTreeVeritas
import rcExceptions as ex
class Dev(rcDevTree.Dev):
def remove_loop(self, r):
cmd = ["losetup", "-d", self.devpath[0]]
ret, out, err = r.vcall(cmd)
if ret != 0:
raise ex.excError(err)
self.removed = True
def remove_dm(self, r):
cmd = ["dmsetup", "remove", self.alias]
ret, out, err = r.vcall(cmd)
if ret != 0:
raise ex.excError(err)
self.removed = True
def remove(self, r):
if self.removed:
return
if self.devname.startswith("loop"):
return self.remove_loop(r)
if self.devname.startswith("dm-"):
return self.remove_dm(r)
class DevTree(rcDevTreeVeritas.DevTreeVeritas, rcDevTree.DevTree):
di = None
dev_h = {}
dev_class = Dev
def get_size(self, devpath):
size = 0
try:
with open(devpath+'/size', 'r') as f:
size = int(f.read().strip()) // 2048
except:
pass
return size
def get_dm(self):
if hasattr(self, 'dm_h'):
return self.dm_h
self.dm_h = {}
if not os.path.exists("/dev/mapper"):
return self.dm_h
try:
cmd = ['dmsetup', 'mknodes']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.communicate()
except:
# best effort
pass
devpaths = glob.glob("/dev/mapper/*")
if '/dev/mapper/control' in devpaths:
devpaths.remove('/dev/mapper/control')
for devpath in devpaths:
try:
s = os.stat(devpath)
except OSError:
continue
minor = os.minor(s.st_rdev)
self.dm_h[devpath.replace("/dev/mapper/", "")] = "dm-%d"%minor
# reverse hash
self._dm_h = {}
for mapname, devname in self.dm_h.items():
self._dm_h[devname] = mapname
return self.dm_h
def get_map_wwid(self, map):
if not which("multipath"):
return None
if not hasattr(self, 'multipath_l'):
self.multipath_l = []
cmd = ['multipath', '-l']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
self.multipath_l = out.decode().splitlines()
for line in self.multipath_l:
if not line.startswith(map):
continue
try:
wwid = line[line.index('(')+2:line.index(')')]
except ValueError:
wwid = line.split()[0]
return wwid
return None
def get_wwid(self):
if hasattr(self, 'wwid_h'):
return self.wwid_h
self.wwid_h = {}
self.wwid_h.update(self.get_wwid_native())
self.wwid_h.update(self.get_mp_powerpath())
return self.wwid_h
def get_wwid_native(self):
if not which("multipath"):
return self.wwid_h
cmd = ['multipath', '-l']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return self.wwid_h
for line in out.decode().splitlines():
if 'dm-' not in line:
continue
devname = line[line.index('dm-'):].split()[0]
try:
wwid = line[line.index('(')+2:line.index(')')]
except ValueError:
wwid = line.split()[0][1:]
self.wwid_h[devname] = wwid
return self.wwid_h
def get_mp(self):
if hasattr(self, 'mp_h'):
return self.mp_h
self.mp_h = {}
self.mp_h.update(self.get_mp_native())
self.mp_h.update(self.get_mp_powerpath())
return self.mp_h
def get_mp_powerpath(self):
self.powerpath = {}
if not which("powermt"):
return {}
cmd = ['powermt', 'display', 'dev=all']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return {}
lines = out.decode().splitlines()
if len(lines) < 1:
return {}
dev = None
paths = []
mp_h = {}
for line in lines:
if len(line) == 0:
# new mpath
# - store previous
# - reset path counter
if dev is not None:
if len(paths) > 0:
did = self.di.disk_id(paths[0])
mp_h[name] = did
self.powerpath[name] = paths
dev = None
paths = []
if 'Pseudo name' in line:
l = line.split('=')
if len(l) != 2:
continue
name = l[1]
dev = "/dev/"+name
else:
l = line.split()
if len(l) < 3:
continue
if l[2].startswith("sd"):
paths.append("/dev/"+l[2])
return mp_h
def get_mp_native(self):
if not which('dmsetup'):
return {}
cmd = ['dmsetup', 'ls', '--target', 'multipath']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return {}
mp_h = {}
for line in out.decode().splitlines():
l = line.split()
if len(l) == 0:
continue
mapname = l[0]
major = l[1].strip('(,')
minor = l[2].strip(' )')
mp_h['dm-'+minor] = mapname
return mp_h
def get_md(self):
if hasattr(self, 'md_h'):
return self.md_h
fpath = "/proc/mdstat"
self.md_h = {}
try:
with open(fpath, 'r') as f:
buff = f.read()
except:
return self.md_h
for line in buff.split('\n'):
if line.startswith("Personalities"):
continue
if len(line) == 0 or line[0] == " ":
continue
l = line.split()
if len(l) < 4:
continue
self.md_h[l[0]] = l[3]
return self.md_h
def load_dm_dev_t(self):
table = {}
if not which('dmsetup'):
return
cmd = ['dmsetup', 'ls']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.decode().splitlines():
l = line.split()
if len(l) == 0:
continue
mapname = l[0]
major = l[1].strip('(,')
minor = l[2].strip(' )')
dev_t = ':'.join((major, minor))
self.dev_h[dev_t] = mapname
def load_dm(self):
table = {}
self.load_dm_dev_t()
if not which('dmsetup'):
return
cmd = ['dmsetup', 'table']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.decode().splitlines():
l = line.split()
if len(l) < 5:
continue
mapname = l[0].strip(':')
size = int(math.ceil(1.*int(l[2])*512/1024/1024))
maptype = l[3]
if maptype == "multipath" and size in [0, 2, 3, 30, 45]:
continue
for w in l[4:]:
if ':' not in w:
continue
if mapname not in table:
table[mapname] = {"devs": [], "size": 0, "type": "linear"}
table[mapname]["devs"].append(w)
table[mapname]["size"] += size
table[mapname]["type"] = maptype
for mapname in table:
d = self.add_dev(mapname, table[mapname]["size"])
d.set_devtype(table[mapname]["type"])
d.set_devpath('/dev/mapper/'+mapname)
s = mapname.replace('--', ':').replace('-', '/').replace(':','-')
if "/" in s:
d.set_devpath('/dev/'+s)
wwid = self.get_map_wwid(mapname)
if wwid is not None:
d.set_alias(wwid)
for dev in table[mapname]["devs"]:
if dev not in self.dev_h:
continue
d.add_parent(self.dev_h[dev])
parentdev = self.get_dev(self.dev_h[dev])
parentdev.add_child(mapname)
def set_udev_symlink(self, d, name):
if not which("udevadm"):
return
cmd = ["/sbin/udevadm", "info", "-q", "symlink", "--name", name]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for s in out.decode().split():
d.set_devpath("/dev/"+s)
def get_lv_linear(self):
if hasattr(self, 'lv_linear'):
return self.lv_linear
self.lv_linear = {}
if not which('dmsetup'):
return self.lv_linear
cmd = ['dmsetup', 'table', '--target', 'linear']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return self.lv_linear
for line in out.decode().splitlines():
l = line.split(':')
if len(l) < 2:
continue
mapname = l[0]
line = line[line.index(':')+1:]
l = line.split()
if len(l) < 3:
continue
length = int(l[1])*512/1024/1024
devt = l[3]
if mapname in self.lv_linear:
self.lv_linear[mapname].append((devt, length))
else:
self.lv_linear[mapname] = [(devt, length)]
return self.lv_linear
def is_cdrom(self, devname):
p = '/sys/block/%s/device/media'%devname
if not os.path.exists(p):
return False
with open(p, 'r') as f:
buff = f.read()
if buff.strip() == "cdrom":
return True
return False
def get_loop(self):
self.loop = {}
cmd = ["losetup"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.decode().splitlines():
if not line.startswith("/"):
continue
l = line.split()
if len(l) < 2:
continue
loop = l[0].replace("/dev/", "")
fpath = l[-1]
self.loop[loop] = fpath
def dev_type(self, devname):
t = "linear"
md_h = self.get_md()
mp_h = self.get_mp()
if devname in md_h:
return md_h[devname]
if devname in mp_h:
return "multipath"
return t
def add_loop_relations(self):
self.get_loop()
from rcMountsLinux import Mounts
m = Mounts()
for devname, fpath in self.loop.items():
if fpath == "(deleted)":
continue
parentpath = m.get_fpath_dev(fpath)
if parentpath is None:
continue
d = self.get_dev_by_devpath(parentpath)
if d is None:
continue
d.add_child(devname)
c = self.get_dev(devname)
c.add_parent(d.devname)
def add_drbd_relations(self):
if not which("drbdadm") or not os.path.exists('/proc/drbd'):
return
cmd = ["drbdadm", "dump-xml"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
from xml.etree import ElementTree as etree
tree = etree.fromstring(out.decode())
for res in tree.getiterator('resource'):
for host in res.findall('host'):
if host.attrib['name'] != rcEnv.nodename:
continue
edisk = host.find('disk')
edev = host.find('device')
if edisk is None or edev is None:
edisk = host.find('volume/disk')
edev = host.find('volume/device')
if edisk is None or edev is None:
continue
devname = 'drbd'+edev.attrib['minor']
parentpath = edisk.text
d = self.get_dev_by_devpath(parentpath)
if d is None:
continue
d.add_child(devname)
c = self.get_dev(devname)
c.add_parent(d.devname)
def load_dev(self, devname, devpath):
if self.is_cdrom(devname):
return
mp_h = self.get_mp()
wwid_h = self.get_wwid()
size = self.get_size(devpath)
# exclude 0-sized md, Symmetrix gatekeeper and vcmdb
if devname in self.mp_h and size in (0, 2, 30, 45):
return
devtype = self.dev_type(devname)
d = self.add_dev(devname, size, devtype)
if d is None:
return
self.set_udev_symlink(d, devname)
self.get_dm()
if 'cciss' in devname:
d.set_devpath('/dev/'+devname.replace('!', '/'))
elif devname in self.mp_h:
if devname in self._dm_h:
d.set_devpath('/dev/mpath/'+self._dm_h[devname])
d.set_devpath('/dev/'+devname)
else:
d.set_devpath('/dev/'+devname)
# store devt
try:
with open("%s/dev"%devpath, 'r') as f:
devt = f.read().strip()
self.dev_h[devt] = devname
except IOError:
pass
# add holders
for holderpath in glob.glob("%s/holders/*"%devpath):
holdername = os.path.basename(holderpath)
if not os.path.exists(holderpath):
# broken symlink
continue
size = self.get_size(holderpath)
devtype = self.dev_type(holdername)
d.add_child(holdername, size, devtype)
# add lv aliases
if devname in self._dm_h:
alias = self._dm_h[devname]
d.set_alias(alias)
d.set_devpath('/dev/mapper/'+alias)
s = alias.replace('--', ':').replace('-', '/').replace(':','-')
d.set_devpath('/dev/'+s)
# add slaves
for slavepath in glob.glob("%s/slaves/*"%devpath):
slavename = os.path.basename(slavepath)
if not os.path.exists(slavepath):
# broken symlink
continue
size = self.get_size(slavepath)
devtype = self.dev_type(slavename)
d.add_parent(slavename, size, devtype)
if devname in wwid_h:
wwid = wwid_h[devname]
d.set_alias(wwid)
try:
p = glob.glob('/dev/mpath/?'+wwid)[0]
d.set_devpath(p)
except:
pass
return d
def get_dev_t(self, dev):
major, minor = self._get_dev_t(dev)
return ":".join((str(major), str(minor)))
def _get_dev_t(self, dev):
try:
s = os.stat(dev)
minor = os.minor(s.st_rdev)
major = os.major(s.st_rdev)
except:
return 0, 0
return major, minor
def load_fdisk(self):
self.get_wwid()
os.environ["LANG"] = "C"
p = Popen(["fdisk", "-l"], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.decode().splitlines():
if line.startswith('/dev/dm-'):
continue
elif line.startswith("Disk "):
# disk
devpath = line.split()[1].strip(':')
if devpath.startswith('/dev/dm-'):
continue
size = int(line.split()[-2]) / 1024 / 1024
if size in [2, 3, 30, 45]:
continue
devname = devpath.replace('/dev/','').replace("/","!")
devtype = self.dev_type(devname)
dev_t = self.get_dev_t(devpath)
self.dev_h[dev_t] = devname
d = self.add_dev(devname, size, devtype)
if d is None:
continue
d.set_devpath(devpath)
if devname.startswith('emc') and devname in self.wwid_h:
d.set_alias(self.wwid_h[devname])
for path in self.powerpath[devname]:
p = self.add_dev(path.replace('/dev/',''), size, "linear")
p.set_devpath(path)
p.add_child(devname)
d.add_parent(path.replace('/dev/',''))
elif line.startswith('Unit'):
unit = int(line.split()[-2])
elif line.startswith('/dev/'):
# partition
line = line.replace('*', '')
_l = line.split()
partpath = _l[0]
partend = int(_l[2])
partstart = int(_l[1])
partsize = (partend - partstart) * unit / 1024/1024
partname = partpath.replace('/dev/','').replace("/","!")
dev_t = self.get_dev_t(partpath)
self.dev_h[dev_t] = partname
p = self.add_dev(partname, partsize, "linear")
if p is None:
continue
p.set_devpath(partpath)
d.add_child(partname)
p.add_parent(devname)
def load_sysfs(self):
for devpath in glob.glob("/sys/block/*"):
devname = os.path.basename(devpath)
if devname.startswith("Vx"):
continue
d = self.load_dev(devname, devpath)
if d is None:
continue
# add parts
for partpath in glob.glob("%s/%s*"%(devpath, devname)):
partname = os.path.basename(partpath)
p = self.load_dev(partname, partpath)
if p is None:
continue
d.add_child(partname)
p.add_parent(devname)
def tune_lv_relations(self):
dm_h = self.get_dm()
for lv, segments in self.get_lv_linear().items():
for devt, length in segments:
if devt not in self.dev_h:
continue
if lv not in dm_h:
continue
child = dm_h[lv]
parent = self.dev_h[devt]
r = self.get_relation(parent, child)
if r is not None:
r.set_used(length)
def load(self, di=None):
if di is not None:
self.di = di
if self.di is None:
from rcDiskInfoLinux import diskInfo
self.di = diskInfo()
if len(glob.glob("/sys/block/*/slaves")) == 0:
self.load_fdisk()
self.load_dm()
else:
self.load_sysfs()
self.tune_lv_relations()
self.load_vx_dmp()
self.load_vx_vm()
self.add_drbd_relations()
self.add_loop_relations()
def blacklist(self, devname):
bl = [r'^ram[0-9]*.*', r'^scd[0-9]*', r'^sr[0-9]*']
for b in bl:
if re.match(b, devname):
return True
return False
if __name__ == "__main__":
tree = DevTree()
tree.load()
#print(tree)
tree.print_tree_bottom_up()
#print(map(lambda x: x.alias, tree.get_top_devs()))
opensvc-1.8~20170412/lib/checkLagSunOS.py 0000644 0001750 0001750 00000015271 13073467726 020042 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcUtilitiesSunOS import get_solaris_version
from rcGlobalEnv import rcEnv
"""
Solaris 10
key: 1 (0x0001) policy: L4 address: 0:15:17:bb:82:d2 (auto)
device address speed duplex link state
e1000g0 0:15:17:bb:82:d2 1000 Mbps full up attached
bnx0 0:24:e8:35:61:3b 1000 Mbps full up attached
Solaris 11
# dladm show-aggr
LINK POLICY ADDRPOLICY LACPACTIVITY LACPTIMER FLAGS
aggr0 L4 auto off short -----
aggrbck0 L4 auto off short -----
aggrpriv0 L4 auto off short -----
# dladm show-phys
LINK MEDIA STATE SPEED DUPLEX DEVICE
net4 Ethernet up 10 full usbecm0
net1 Ethernet up 1000 full ixgbe1
net0 Ethernet up 1000 full ixgbe0
net2 Ethernet up 1000 full ixgbe2
net3 Ethernet up 1000 full ixgbe3
# dladm show-link
LINK CLASS MTU STATE OVER
net4 phys 1500 up --
net1 phys 1500 up --
net2 phys 1500 up --
net0 phys 1500 up --
aggrbck0 aggr 1500 up net1
aggrpriv0 aggr 1500 up net3
bckg0 vnic 1500 up aggrbck0
zrac1_a0 vnic 1500 up aggr0
zrac3_p_a0 vnic 1500 up aggrpriv0
"""
class check(checks.check):
chk_type = "lag"
chk_name = "Solaris network interface lag"
def do_check(self):
if not which("dladm"):
return self.undef
self.osver = get_solaris_version()
if self.osver >= 11:
cmd = ['dladm', 'show-phys', '-p', '-o', 'link,state,speed,duplex']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
self.phys = out.split('\n')
cmd = ['dladm', 'show-aggr', '-p', '-o', 'link']
else:
cmd = ['dladm', 'show-aggr']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
if self.osver >= 11:
self.aggs = out.split('\n')
if len(self.aggs) == 0:
return self.undef
self.listaggs = {}
cmd = ['dladm', 'show-link', '-p', '-o', 'link,over']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
for line in lines:
if len(line) == 0:
break
l = line.split(':')
if l[0] in self.aggs:
self.listaggs[l[0]] = l[1]
else:
self.lines = out.split('\n')
if len(self.lines) == 0:
return self.undef
r = []
r += self.do_check_speed()
r += self.do_check_duplex()
r += self.do_check_link()
r += self.do_check_attach()
return r
def do_check_speed(self):
r = []
lag = ""
i = 0
if self.osver >= 11:
for lag in self.aggs:
if len(lag) == 0:
break
nets = self.listaggs[lag].split(' ')
for net in nets:
if len(net) == 0:
break
for phy in self.phys:
if phy.startswith(net+':'):
l = phy.split(':')
val = l[2]
r.append({
'chk_instance': '%s.%s.speed'%(lag, net),
'chk_value': str(val),
'chk_svcname': '',
})
return r
for line in self.lines:
l = line.split()
if len(l) < 4:
continue
elif line.startswith('key'):
lag = l[1]
i = 0
continue
elif l[0] == 'device':
continue
val = l[2]
r.append({
'chk_instance': '%s.%d.speed'%(lag, i),
'chk_value': str(val),
'chk_svcname': '',
})
i += 1
return r
def do_check_duplex(self):
return self._do_check("duplex", "full", 4)
def do_check_link(self):
return self._do_check("link", "up", 5)
def do_check_attach(self):
return self._do_check("attach", "attached", 6)
def _do_check(self, key, target, col):
r = []
lag = ""
i = 0
if self.osver >= 11:
if key == "duplex":
col = 3
if key == "attach":
return r
if key == "link":
col = 1
for lag in self.aggs:
if len(lag) == 0:
break
nets = self.listaggs[lag].split(' ')
for net in nets:
if len(net) == 0:
break
for phy in self.phys:
if phy.startswith(net+':'):
l = phy.split(':')
if l[col] != target:
val = 1
else:
val = 0
r.append({
'chk_instance': '%s.%s.%s'%(lag, net, key),
'chk_value': str(val),
'chk_svcname': '',
})
return r
for line in self.lines:
l = line.split()
if len(l) < col+1:
continue
elif line.startswith('key'):
lag = l[1]
i = 0
continue
elif l[0] == 'device':
continue
else:
if l[col] != target:
val = 1
else:
val = 0
r.append({
'chk_instance': '%s.%d.%s'%(lag, i, key),
'chk_value': str(val),
'chk_svcname': '',
})
i += 1
return r
opensvc-1.8~20170412/lib/provIpAmazon.py 0000644 0001750 0001750 00000006424 13073467726 020036 0 ustar jkelbert jkelbert from provisioning import Provisioning
import rcExceptions as ex
from svcBuilder import conf_get_string_scope
class ProvisioningIp(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def provisioner(self):
self.provisioner_private()
self.provisioner_public()
self.provisioner_docker_ip()
self.cascade_allocation()
self.r.log.info("provisioned")
self.r.start()
return True
def cascade_allocation(self):
if not self.r.svc.config.has_option(self.r.rid, "cascade_allocation"):
return
cascade = self.r.svc.config.get(self.r.rid, "cascade_allocation").split()
need_write = False
for e in cascade:
try:
rid, param = e.split(".")
except:
self.r.log.warning("misformatted cascade entry: %s (expected . [@])" % e)
continue
if not self.r.svc.config.has_section(rid):
self.r.log.warning("misformatted cascade entry: %s (rid does not exist)" % e)
continue
need_write = True
self.r.log.info("cascade %s to %s" % (self.r.ipname, e))
self.r.svc.config.set(rid, param, self.r.ipname)
self.r.svc.resources_by_id[rid].ipname = conf_get_string_scope(self.r.svc, self.r.svc.config, rid, param)
self.r.svc.resources_by_id[rid].addr = self.r.svc.resources_by_id[rid].ipname
if need_write:
self.r.svc.write_config()
def provisioner_docker_ip(self):
if not self.r.svc.config.has_option(self.r.rid, "docker_daemon_ip"):
return
if not self.r.svc.config.get(self.r.rid, "docker_daemon_ip"):
return
try:
args = self.r.svc.config.get("DEFAULT", "docker_daemon_args")
except:
args = ""
args += " --ip "+self.r.ipname
self.r.svc.config.set("DEFAULT", "docker_daemon_args", args)
self.r.svc.write_config()
for r in self.r.svc.get_resources("container.docker"):
# reload docker dameon args
r.on_add()
def provisioner_private(self):
if self.r.ipname != "":
self.r.log.info("private ip already provisioned")
return
eni = self.r.get_network_interface()
if eni is None:
raise ex.excError("could not find ec2 network interface for %s" % self.ipdev)
ips1 = set(self.r.get_instance_private_addresses())
data = self.r.aws([
"ec2", "assign-private-ip-addresses",
"--network-interface-id", eni,
"--secondary-private-ip-address-count", "1"
])
ips2 = set(self.r.get_instance_private_addresses())
new_ip = list(ips2 - ips1)[0]
self.r.svc.config.set(self.r.rid, "ipname", new_ip)
self.r.svc.write_config()
self.r.ipname = new_ip
def provisioner_public(self):
if self.r.eip != "":
self.r.log.info("public ip already provisioned")
return
data = self.r.aws([
"ec2", "allocate-address",
"--domain", "vpc",
])
self.r.svc.config.set(self.r.rid, "eip", data["PublicIp"])
self.r.svc.write_config()
self.r.eip = data["PublicIp"]
opensvc-1.8~20170412/lib/rcOs.py 0000644 0001750 0001750 00000000200 13073467726 016301 0 ustar jkelbert jkelbert class Os(object):
def reboot(self):
print("not implemented")
def crash(self):
print("not implemented")
opensvc-1.8~20170412/lib/resFsHP-UX.py 0000644 0001750 0001750 00000006050 13073467726 017250 0 ustar jkelbert jkelbert import os
import rcStatus
from rcGlobalEnv import rcEnv
rcMounts = __import__('rcMounts'+rcEnv.sysname)
import resFs as Res
from rcUtilities import qcall, protected_mount
import rcExceptions as ex
def try_umount(self):
cmd = ['umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
""" don't try to kill process using the source of a
protected bind mount
"""
if protected_mount(self.mount_point):
return 1
""" best effort kill of all processes that might block
the umount operation. The priority is given to mass
action reliability, ie don't contest oprator's will
"""
cmd = ['sync']
(ret, out, err) = self.vcall(cmd, err_to_info=True)
for i in range(4):
cmd = ['fuser', '-kc', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
self.log.info('umount %s'%self.mount_point)
cmd = ['umount', self.mount_point]
ret = qcall(cmd)
if ret == 0:
break
return ret
class Mount(Res.Mount):
""" define HP-UX mount/umount doAction """
def __init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size=None,
**kwargs):
Res.Mount.__init__(self,
rid,
mount_point=mount_point,
device=device,
fs_type=fs_type,
mount_options=mount_options,
snap_size=snap_size,
**kwargs)
self.fsck_h = {
'vxfs': {
'bin': 'fsck',
'cmd': ['fsck', '-F', 'vxfs', '-y', self.device]
},
}
def is_up(self):
return rcMounts.Mounts().has_mount(self.device, self.mount_point)
def start(self):
Res.Mount.start(self)
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return 0
self.fsck()
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
if self.fs_type != "":
fstype = ['-F', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
cmd = ['mount']+fstype+mntopt+[self.device, self.mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.can_rollback = True
def stop(self):
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return 0
for i in range(3):
ret = try_umount(self)
if ret == 0: break
if ret != 0:
self.log.error('failed to umount %s'%self.mount_point)
raise ex.excError
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/rcBrocade.py 0000644 0001750 0001750 00000010115 13073467726 017265 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
import rcExceptions as ex
import os
import ConfigParser
import telnetlib
from rcGlobalEnv import rcEnv
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def brocadetelnetcmd(cmd, switch, username, password):
tn = telnetlib.Telnet(switch)
tn.read_until("login: ")
tn.write(username + '\n')
tn.read_until("Password: ")
tn.write(password + '\n')
tn.read_until("> ")
tn.write(cmd + '\n')
tn.write('exit\n')
out = tn.read_all()
return out, "", 0
def brocadecmd(cmd, switch, username, key):
_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'ForwardX11=no',
'-o', 'ConnectTimeout=5',
'-o', 'PasswordAuthentication=no',
'-l', username, '-i', key, switch, cmd]
out, err, ret = justcall(_cmd)
if "command not found" in err:
# bogus firmware syntax
_cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'ForwardX11=no',
'-o', 'ConnectTimeout=5',
'-o', 'PasswordAuthentication=no',
'-l', username, '-i', key, switch, 'bash --login -c '+cmd]
out, err, ret = justcall(_cmd)
if ret != 0:
raise ex.excError("brocade command execution error")
return out, err, ret
class Brocades(object):
switchs = []
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = []
for s in conf.sections():
if self.filtering and s not in self.objects:
continue
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "brocade":
continue
name = s
key = None
password = None
try:
username = conf.get(s, 'username')
except:
print("no 'username' parameter in %s section %s"%(cf, s))
continue
try:
key = conf.get(s, 'key')
except:
pass
try:
password = conf.get(s, 'password')
except:
pass
if key is None and password is None:
print("no 'key' nor 'password' parameter in %s section %s"%(cf, s))
continue
m.append([name, username, key, password])
del(conf)
for name, username, key, password in m:
self.switchs.append(Brocade(name, username, key, password))
def __iter__(self):
for switch in self.switchs:
yield(switch)
class Brocade(object):
def __init__(self, name, username, key, password):
self.name = name
self.username = username
self.password = password
self.key = key
self.keys = ['brocadeswitchshow', 'brocadensshow', 'brocadezoneshow']
def brocadecmd(self, cmd):
if self.key is not None:
return brocadecmd(cmd, self.name, self.username, self.key)
elif self.password is not None:
return brocadetelnetcmd(cmd, self.name, self.username, self.password)
else:
raise Exception("ssh nor telnet method available")
def get_brocadeswitchshow(self):
cmd = 'switchshow'
print("%s: %s"%(self.name, cmd))
buff = self.brocadecmd(cmd)[0]
return buff
def get_brocadensshow(self):
cmd = 'nsshow'
print("%s: %s"%(self.name, cmd))
buff = self.brocadecmd(cmd)[0]
return buff
def get_brocadezoneshow(self):
cmd = 'zoneshow'
print("%s: %s"%(self.name, cmd))
buff = self.brocadecmd(cmd)[0]
return buff
if __name__ == "__main__":
o = Brocades()
for brocade in o:
print(brocade.get_brocadeswitchshow())
opensvc-1.8~20170412/lib/resFsSunOS.py 0000644 0001750 0001750 00000013661 13073467726 017424 0 ustar jkelbert jkelbert import os
import time
import re
import rcStatus
import rcMountsSunOS as rcMounts
import resFs as Res
import rcExceptions as ex
from rcZfs import zfs_getprop, zfs_setprop
from rcUtilities import justcall
class Mount(Res.Mount):
"""
SunOS fs resource driver.
"""
def __init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size=None,
**kwargs):
self.rdevice = device.replace('/dsk/','/rdsk/',1)
self.Mounts = rcMounts.Mounts()
Res.Mount.__init__(self,
rid=rid,
mount_point=mount_point,
device=device,
fs_type=fs_type,
mount_options=mount_options,
snap_size=snap_size,
**kwargs)
self.fsck_h = {
'ufs': {
'bin': 'fsck',
'cmd': ['fsck', '-F', 'ufs', '-y', self.rdevice],
'reportcmd': ['fsck', '-F', 'ufs', '-m', self.rdevice],
'reportclean': [ 32 ],
},
'vxfs': {
'bin': 'fsck',
'cmd': ['fsck', '-F', 'vxfs', '-y', self.rdevice],
'reportcmd': ['fsck', '-F', 'vxfs', '-m', self.rdevice],
'reportclean': [ 32 ],
},
}
def is_up(self):
self.Mounts = rcMounts.Mounts()
return self.Mounts.has_mount(self.device, self.mount_point)
def start(self):
self.Mounts = None
Res.Mount.start(self)
m = re.match("<(\w+)>", self.mount_point)
if m:
# the zone was not created when the service was built. now it should,
# so try the redetect the zonepath
zone = m.group(1)
for r in self.svc.get_resources("container.zone"):
if r.name == zone:
zonepath = r.get_zonepath()
self.mount_point = re.sub("<\w+>", zonepath, self.mount_point)
if self.fs_type == 'zfs' :
if 'noaction' not in self.tags and zfs_getprop(self.device, 'canmount' ) != 'noauto' :
self.log.info("%s should be set to canmount=noauto (zfs set canmount=noauto %s)"%(self.label, self.device))
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return
if self.fs_type == 'zfs' :
if 'encap' not in self.tags and not self.svc.config.has_option(self.rid, 'zone') and zfs_getprop(self.device, 'zoned') != 'off':
if zfs_setprop(self.device, 'zoned', 'off'):
raise ex.excError
if zfs_getprop(self.device, 'mountpoint') != self.mount_point:
if not zfs_setprop(self.device, 'mountpoint', self.mount_point):
raise ex.excError
self.Mounts = None
if self.is_up() is True:
return
(stdout,stderr,returncode)= justcall(['rm', self.mount_point+"/.opensvc" ])
ret, out, err = self.vcall(['zfs', 'mount', self.device ])
if ret != 0:
ret, out, err = self.vcall(['zfs', 'mount', '-O', self.device ])
if ret != 0:
raise ex.excError
return
elif self.fs_type != "":
fstype = ['-F', self.fs_type]
self.fsck()
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
for i in range(3):
ret = self.try_mount(fstype, mntopt)
if ret == 0: break
time.sleep(1)
self.Mounts = None
if ret != 0:
raise ex.excError
self.can_rollback = True
def can_check_writable(self):
if self.fs_type != 'zfs':
return True
pool = self.device.split("/")[0]
cmd = ["zpool", "status", pool]
out, err, ret = justcall(cmd)
if "state: SUSPENDED" in out:
self.status_log("pool %s is suspended")
return False
return True
def try_mount(self, fstype, mntopt):
cmd = ['mount'] + fstype + mntopt + [self.device, self.mount_point]
ret, out, err = self.vcall(cmd)
return ret
def try_umount(self):
if self.fs_type == 'zfs' :
ret, out, err = self.vcall(['zfs', 'umount', self.device ], err_to_info=True)
if ret != 0 :
ret, out, err = self.vcall(['zfs', 'umount', '-f', self.device ], err_to_info=True)
if ret != 0 :
raise ex.excError
return
(ret, out, err) = self.vcall(['umount', self.mount_point], err_to_info=True)
if ret == 0 :
return
for i in range(4):
(ret, out, err) = self.vcall(['fuser', '-ck', self.mount_point],
err_to_info=True)
(ret, out, err) = self.vcall(['umount', self.mount_point],
err_to_info=True)
if ret == 0 :
return
if self.fs_type != 'lofs' :
(ret, out, err) = self.vcall(['umount', '-f', self.mount_point],
err_to_info=True)
if ret == 0 :
return
raise ex.excError
def stop(self):
self.Mounts = None
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return
try:
self.try_umount()
except:
self.Mounts = None
self.log.error("failed")
raise ex.excError
self.Mounts = None
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/resContainerJail.py 0000644 0001750 0001750 00000006403 13073467726 020642 0 ustar jkelbert jkelbert import os
from datetime import datetime
import rcStatus
import resources as Res
from rcUtilitiesFreeBSD import check_ping
from rcUtilities import qcall
import resContainer
import rcExceptions as ex
class Jail(resContainer.Container):
""" jail -c name=jail1
path=/usr/local/opt/jail1.opensvc.com
host.hostname=jail1.opensvc.com
ip4.addr=192.168.0.208
command=/bin/sh /etc/rc
"""
def files_to_sync(self):
return []
def operational(self):
return True
def install_drp_flag(self):
rootfs = self.jailroot
flag = os.path.join(rootfs, ".drp_flag")
self.log.info("install drp flag in container : %s"%flag)
with open(flag, 'w') as f:
f.write(' ')
f.close()
def container_start(self):
cmd = ['jail', '-c', 'name='+self.basename, 'path='+self.jailroot,
'host.hostname='+self.name]
if len(self.ips) > 0:
cmd += ['ip4.addr='+','.join(self.ips)]
if len(self.ip6s) > 0:
cmd += ['ip6.addr='+','.join(self.ip6s)]
cmd += ['command=/bin/sh', '/etc/rc']
self.log.info(' '.join(cmd))
ret = qcall(cmd)
if ret != 0:
raise ex.excError
def container_stop(self):
cmd = ['jail', '-r', self.basename]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_forcestop(self):
""" no harder way to stop a lxc container, raise to signal our
helplessness
"""
self.log.error("no forced stop method")
raise ex.excError
def ping(self):
return check_ping(self.addr, timeout=1)
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
cmd = ['jls']
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
for line in out.split('\n'):
l = line.split()
if len(l) < 4:
continue
if l[2] == self.name:
return True
return False
def get_container_info(self):
print("TODO: get_container_info()")
return {'vcpus': '0', 'vmem': '0'}
def _status(self, verbose=False):
if self.is_up():
return rcStatus.UP
else:
return rcStatus.DOWN
def __init__(self,
rid,
name,
guestos="FreeBSD",
jailroot="/tmp",
ips=[],
ip6s=[],
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
guestos=guestos,
type="container.jail",
osvc_root_path=osvc_root_path,
**kwargs)
self.jailroot = jailroot
self.ips = ips
self.ip6s = ip6s
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
opensvc-1.8~20170412/lib/rcPasswdLinux.py 0000644 0001750 0001750 00000000541 13073467726 020211 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilities import which
def change_root_pw(pw):
if which('chpasswd') is not None:
cmd = ['chpasswd']
_input = "root:"+pw+"\n"
else:
cmd = ['passwd', '--stdin', 'root']
_input = pw
p = Popen(cmd, stdin=PIPE)
p.stdin.write(_input)
p.communicate()
return p.returncode
opensvc-1.8~20170412/lib/freezer.py 0000644 0001750 0001750 00000003110 13073467726 017040 0 ustar jkelbert jkelbert """
Define the Freezer class, instanciated as a Svc lazy attribute,
providing the methods to freeze, thaw a service and to test if
a service is frozen.
"""
import os
from rcGlobalEnv import rcEnv
class Freezer(object):
"""
The freezer class, instanciated as a Svc lazy attribute.
Provides methods to freeze, thaw a service and to test if
the service is frozen.
"""
flag_dir = rcEnv.pathvar
base_flag = os.path.join(flag_dir, 'FROZEN')
flag = base_flag
@staticmethod
def _dummy():
"""
A no-op method to replace freeze/thaw/frozen when the service
configuration file does not exist.
"""
pass
def _frozen(self):
"""
Return True if the service frozen file flag is present.
"""
if os.path.exists(self.flag) or os.path.exists(self.base_flag):
return True
return False
def _freeze(self):
"""
Create the service frozen file flag.
"""
open(self.flag, 'w').close()
def _thaw(self):
"""
Remove the service frozen file flag.
"""
if self.flag != self.base_flag and os.path.exists(self.flag):
os.unlink(self.flag)
def __init__(self, name):
if not os.path.exists(os.path.join(rcEnv.pathetc, name)):
self.freeze = self._dummy
self.thaw = self._dummy
self.frozen = self._dummy
else:
self.flag = self.flag + "." + name
self.freeze = self._freeze
self.thaw = self._thaw
self.frozen = self._frozen
opensvc-1.8~20170412/lib/ipaddress.py 0000644 0001750 0001750 00000234040 13073467726 017364 0 ustar jkelbert jkelbert # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.16'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
addr1.address_exclude(addr2) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
addr1.address_exclude(addr2) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address)
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
opensvc-1.8~20170412/lib/resDiskRadosLinux.py 0000644 0001750 0001750 00000021053 13073467726 021021 0 ustar jkelbert jkelbert import resDisk
import os
import rcStatus
import rcExceptions as ex
import json
from rcGlobalEnv import *
from rcUtilities import justcall
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
images=set([]),
client_id=None,
keyring=None,
**kwargs):
resDisk.Disk.__init__(self,
rid=rid,
type="disk.vg",
**kwargs)
self.images = images
self.keyring = keyring
if not client_id.startswith("client."):
client_id = "client."+client_id
self.client_id = client_id
self.label = self.fmt_label()
self.modprobe_done = False
def validate_image_fmt(self):
l = []
for image in self.images:
if "/" not in image:
l.append(image)
if len(l):
raise ex.excError("wrong format (expected pool/image): "+", ".join(l))
def fmt_label(self):
s = "rados images: "
s += ", ".join(self.images)
return s
def modprobe(self):
if self.modprobe_done:
return
cmd = ["lsmod"]
ret, out, err = self.call(cmd)
if ret != 0:
raise ex.excError("lsmod failed")
if "rbd" in out.split():
# no need to load (already loaded or compiled-in)
return
cmd = ["modprobe", "rbd"]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to load rbd device driver")
self.modprobe_done = True
def showmapped(self, refresh=False):
if not refresh and hasattr(self, "mapped_data"):
return self.mapped_data
self.modprobe()
cmd = ["rbd", "showmapped", "--format", "json"]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("rbd showmapped failed: "+err)
try:
_data = json.loads(out)
except Exception as e:
raise ex.excError(str(e))
data = {}
for id, img_data in _data.items():
data[img_data["pool"]+"/"+img_data["name"]] = img_data
self.mapped_data = data
return data
def rbd_rcmd(self):
l = ["rbd", "-n", self.client_id]
if self.keyring:
l += ["--keyring", self.keyring]
return l
def exists(self, image):
cmd = self.rbd_rcmd()+["info", image]
out, err, ret = justcall(cmd)
if ret != 0:
return False
return True
def has_it(self, image):
mapped = self.showmapped()
if image in mapped:
return True
return False
def up_count(self):
mapped = self.showmapped()
l = []
for image in self.images:
if image in mapped:
l.append(image)
return l
def _status(self, verbose=False):
try:
self.validate_image_fmt()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
l = self.up_count()
n = len(l)
unmapped = sorted(list(set(self.images) - set(l)))
if n == len(self.images):
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_UP
return rcStatus.UP
elif n == 0:
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_DOWN
return rcStatus.DOWN
else:
self.status_log("unmapped: "+", ".join(unmapped))
return rcStatus.WARN
def devname(self, image):
return os.path.join(os.sep, "dev", "rbd", image)
def do_start_one(self, image):
mapped = self.showmapped()
if image in mapped:
self.log.info(image+" is already mapped")
return
cmd = self.rbd_rcmd()+["map", image]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to map %s"%self.devname(image))
def do_start(self):
self.validate_image_fmt()
for image in self.images:
self.do_start_one(image)
self.can_rollback = True
self.showmapped(refresh=True)
def do_stop_one(self, image):
mapped = self.showmapped()
if image not in mapped:
self.log.info(image+" is already unmapped")
return
cmd = ["rbd", "unmap", self.devname(image)]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to unmap %s"%self.devname(image))
def do_stop(self):
self.validate_image_fmt()
for image in self.images:
self.do_stop_one(image)
self.showmapped(refresh=True)
def disklist(self):
l = set([])
for image in self.images:
s = ".".join(("rbd", image.replace("/", ".")))
l.add(s)
return l
def devlist(self):
l = set([])
for image in self.images:
s = self.devname(image)
s = os.path.realpath(s)
l.add(s)
return l
def provision(self):
m = __import__("provDiskRadosLinux")
prov = getattr(m, "ProvisioningDisk")(self)
prov.provisioner()
class DiskLock(Disk):
def __init__(self,
rid=None,
type="disk.lock",
images=set([]),
client_id=None,
keyring=None,
lock=None,
lock_shared_tag=None,
optional=False,
disabled=False,
tags=set([]),
always_on=set([]),
monitor=False,
restart=0,
subset=None):
self.lock = lock
self.lock_shared_tag = lock_shared_tag
Disk.__init__(self,
rid=rid,
type=type,
images=images,
client_id=client_id,
keyring=keyring,
optional=optional,
disabled=disabled,
tags=tags,
always_on=always_on,
monitor=monitor,
restart=restart,
subset=subset)
self.label = self.fmt_label()
self.unlocked = []
def fmt_label(self):
return str(self.lock) + " lock on " + Disk.fmt_label(self)
def locklist(self, image):
cmd = self.rbd_rcmd()+["lock", "list", image, "--format", "json"]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("rbd lock list failed")
data = {}
try:
data = json.loads(out)
except Exception as e:
raise ex.excError(str(e))
return data
def has_lock(self, image):
data = self.locklist(image)
if rcEnv.nodename in data:
return True
self.unlocked.append(image)
return False
def up_count(self):
n = 0
for image in self.images:
if self.has_lock(image):
n += 1
return n
def _status(self, verbose=False):
n = self.up_count()
if n == len(self.images):
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_UP
return rcStatus.UP
elif n == 0:
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_DOWN
return rcStatus.DOWN
else:
self.status_log("unlocked: "+", ".join(self.unlocked))
return rcStatus.WARN
def do_stop_one(self, image):
data = self.locklist(image)
if rcEnv.nodename not in data:
self.log.info(image+" is already unlocked")
return
i = 0
while len(data) > 0 or i>20:
i += 1
cmd = self.rbd_rcmd()+["lock", "remove", image, rcEnv.nodename, data[rcEnv.nodename]["locker"]]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to unlock %s"%self.devname(image))
data = self.locklist(image)
def do_start_one(self, image):
data = self.locklist(image)
if rcEnv.nodename in data:
self.log.info(image+" is already locked")
return
cmd = self.rbd_rcmd()+["lock", "add", image, rcEnv.nodename]
if self.lock_shared_tag:
cmd += ["--shared", self.lock_shared_tag]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to lock %s"%self.devname(image))
def provision(self):
return
opensvc-1.8~20170412/lib/checkLagHP-UX.py 0000644 0001750 0001750 00000003547 13073467726 017677 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import glob
import rcEthtool
class check(checks.check):
chk_type = "lag"
def do_check(self):
if not which("lanscan"):
return []
cmd = ["lanscan", "-q"]
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
r = []
self.lag = {}
for line in out.split("\n"):
if len(line) == 0:
continue
l = line.split()
n = len(l)
if n < 2:
# not apa
continue
if self.has_inet(l[0]):
self.lag[l[0]] = l[1:]
cmd = ["lanscan", "-v"]
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
self.intf_status = {}
for line in out.split("\n"):
if 'ETHER' not in line or line.startswith('0x'):
continue
l = line.split()
n = len(l)
if n < 5 or not l[3].startswith('lan'):
continue
intf = l[3].replace('lan', '')
status = l[2]
self.intf_status[intf] = status
for intf, slaves in self.lag.items():
i = 0
for slave in slaves:
if slave in self.intf_status and self.intf_status[slave] == 'UP':
i += 1
inst = "lan" + intf + ".paths"
val = str(i)
r.append({
'chk_instance': inst,
'chk_value': val,
'chk_svcname': '',
})
return r
def has_inet(self, intf):
cmd = ["ifconfig", "lan"+intf]
out, err, ret = justcall(cmd)
if ret != 0:
return False
if 'inet' in out:
return True
return False
opensvc-1.8~20170412/lib/rcSysReportLinux.py 0000644 0001750 0001750 00000000230 13073467726 020715 0 ustar jkelbert jkelbert import rcSysReport
class SysReport(rcSysReport.SysReport):
def __init__(self, node=None):
rcSysReport.SysReport.__init__(self, node=node)
opensvc-1.8~20170412/lib/rcNode.py 0000644 0001750 0001750 00000001737 13073467726 016625 0 ustar jkelbert jkelbert import os
import sys
from rcGlobalEnv import rcEnv
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
def node_get_node_env():
import codecs
config = ConfigParser.RawConfigParser()
if not os.path.exists(rcEnv.nodeconf):
return 'TST'
with codecs.open(rcEnv.nodeconf, "r", "utf8") as f:
if sys.version_info[0] >= 3:
config.read_file(f)
else:
config.readfp(f)
if config.has_section('node'):
if config.has_option('node', 'env'):
return config.get('node', 'env')
if config.has_option('node', 'host_mode'):
# deprecated
return config.get('node', 'host_mode')
elif config.has_option('DEFAULT', 'env'):
return config.get('DEFAULT', 'env')
return 'TST'
def discover_node():
"""Fill rcEnv class with information from node discovery
"""
if not hasattr(rcEnv, "env"):
rcEnv.node_env = node_get_node_env()
opensvc-1.8~20170412/lib/hostidHP-UX.py 0000644 0001750 0001750 00000000575 13073467726 017466 0 ustar jkelbert jkelbert from rcUtilities import which
from subprocess import *
def hostid():
if which('getconf') is None:
return '1'
cmd = ['getconf', 'MACHINE_SERIAL']
p = Popen(cmd, stderr=None, stdout=PIPE, close_fds=True)
buff = p.communicate()
sn = buff[0].split()[0]
if p.returncode != 0:
return '1'
return str(hex(abs(sn.__hash__()))).replace('0x', '')
opensvc-1.8~20170412/lib/rcNsr.py 0000644 0001750 0001750 00000002233 13073467726 016472 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
import rcExceptions as ex
import os
import socket
class Nsr(object):
def __init__(self):
if not which('mminfo'):
raise ex.excError('mminfo not found')
self.keys = ['mminfo']
def get_mminfo(self):
os.environ["LC_TIME"] = "en_DK"
cmd = ['mminfo', '-x', 'c;', '-q', 'savetime>=last day', '-r', 'client,name,group,totalsize,savetime(30),ssretent(30),volume,level,ssid(53)']
print(' '.join(cmd))
lines = justcall(cmd)[0].split('\n')[1:]
for li, line in enumerate(lines):
if len(line) == 0:
continue
try:
i = line.index(';')
except ValueError:
continue
client = line[:i]
try:
a = socket.getaddrinfo(client, None)
except socket.gaierror:
a = []
if len(a) > 0:
ip = a[0][-1][0]
else:
ip = client
lines[li] = ip + line[i:]
return unicode('\n'.join(lines), errors='ignore')
if __name__ == "__main__":
o = Nsr()
print(o.get_mminfo())
opensvc-1.8~20170412/lib/resSyncRsync.py 0000644 0001750 0001750 00000040030 13073467726 020045 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which, justcall
import rcExceptions as ex
import rcStatus
import datetime
import resSync
def lookup_snap_mod():
if rcEnv.sysname == 'Linux':
return __import__('snapLvmLinux')
elif rcEnv.sysname == 'HP-UX':
return __import__('snapVxfsHP-UX')
elif rcEnv.sysname == 'AIX':
return __import__('snapJfs2AIX')
elif rcEnv.sysname in ['SunOS', 'FreeBSD']:
return __import__('snapZfsSunOS')
elif rcEnv.sysname in ['OSF1']:
return __import__('snapAdvfsOSF1')
else:
raise ex.excError
def get_timestamp_filename(self, node):
sync_timestamp_d = os.path.join(rcEnv.pathvar, 'sync', node)
sync_timestamp_f = os.path.join(sync_timestamp_d, self.svc.svcname+'!'+self.rid)
return sync_timestamp_f
def add_sudo_rsync_path(options):
if "--rsync-path" not in " ".join(options):
options += ['--rsync-path', 'sudo rsync']
return options
new = []
skip = False
for i, w in enumerate(options):
if skip:
skip = False
continue
if w.startswith('--rsync-path'):
if "=" in w:
l = w.split("=")
if len(l) == 2:
val = l[1]
elif len(options) > i+1:
val = options[i+1]
skip = True
else:
raise ex.excError("malformed --rsync-path value")
if not "sudo " in val:
val = val.strip("'")
val = val.strip('"')
val = "sudo "+val
new += ['--rsync-path', val]
else:
new.append(w)
return new
def get_timestamp(self, node):
ts = None
sync_timestamp_f = get_timestamp_filename(self, node)
if not os.path.exists(sync_timestamp_f):
return None
try:
with open(sync_timestamp_f, 'r') as f:
d = f.read()
ts = datetime.datetime.strptime(d,"%Y-%m-%d %H:%M:%S.%f\n")
f.close()
except:
self.log.info("failed get last sync date for %s to %s"%(self.src, node))
return ts
return ts
class Rsync(resSync.Sync):
"""Defines a rsync job from local node to its remote nodes. Target nodes
can be restricted to production sibblings or to disaster recovery nodes,
or both.
"""
def node_can_sync(self, node):
ts = get_timestamp(self, node)
return not self.skip_sync(ts)
def node_need_sync(self, node):
ts = get_timestamp(self, node)
return self.alert_sync(ts)
def can_sync(self, target=None):
targets = set([])
if target is None:
targets = self.nodes_to_sync('nodes')
targets |= self.nodes_to_sync('drpnodes')
else:
targets = self.nodes_to_sync(target)
if len(targets) == 0:
return False
return True
def nodes_to_sync(self, target=None, state="syncable", status=False):
""" Checks are ordered by cost
"""
if self.skip or self.is_disabled():
return set([])
""" DRP nodes are not allowed to sync nodes nor drpnodes
"""
if rcEnv.nodename in self.svc.drpnodes:
return set([])
self.pre_sync_check_flex_primary()
"""Discard the local node from the set
"""
if target in self.target.keys():
targets = self.target[target].copy()
else:
return set([])
targets -= set([rcEnv.nodename])
if len(targets) == 0:
return set([])
for node in targets.copy():
if state == "syncable" and not self.node_can_sync(node):
targets -= set([node])
continue
elif state == "late" and not self.node_need_sync(node):
targets -= set([node])
continue
if len(targets) == 0:
return set([])
"""Accept to sync from here only if the service is up
Also accept n/a status, because it's what the avail status
ends up to be when only sync#* are specified using --rid
sync#i1 is an exception, because we want all prd nodes to
sync their system files to all drpnodes regardless of the service
state
"""
s = self.svc.group_status(excluded_groups=set(["sync", "hb", "app"]))
if not self.svc.options.force and \
s['avail'].status not in [rcStatus.UP, rcStatus.NA] and \
self.rid != "sync#i1":
if s['avail'].status == rcStatus.WARN:
if not self.svc.options.cron:
self.log.info("won't sync this resource service in warn status")
elif not self.svc.options.cron:
self.log.info("won't sync this resource for a service not up")
return set([])
for node in targets.copy():
if not status and not self.remote_node_env(node, target):
targets -= set([node])
continue
if not status and not self.remote_fs_mounted(node):
targets -= set([node])
continue
if len(targets) == 0:
return set([])
return targets
def bwlimit_option(self):
if self.bwlimit is not None:
bwlimit = [ '--bwlimit='+str(self.bwlimit) ]
elif self.svc.bwlimit is not None:
bwlimit = [ '--bwlimit='+str(self.svc.bwlimit) ]
else:
bwlimit = []
return bwlimit
def mangle_options(self, ruser):
options = self.get_options()
if ruser != "root":
options = add_sudo_rsync_path(options)
options += self.bwlimit_option()
if '-e' in options:
return options
if rcEnv.rsh.startswith("/usr/bin/ssh") and rcEnv.sysname == "SunOS":
# SunOS "ssh -n" doesn't work with rsync
rsh = rcEnv.rsh.replace("-n", "")
else:
rsh = rcEnv.rsh
options += ['-e', rsh]
return options
def sync_timestamp(self, node):
sync_timestamp_f = get_timestamp_filename(self, node)
sync_timestamp_d = os.path.dirname(sync_timestamp_f)
sync_timestamp_d_src = os.path.join(rcEnv.pathvar, 'sync', rcEnv.nodename)
sync_timestamp_f_src = os.path.join(sync_timestamp_d_src, self.svc.svcname+'!'+self.rid)
sched_timestamp_f = os.path.join(rcEnv.pathvar, '_'.join(('last_sync', self.svc.svcname, self.rid)))
if not os.path.isdir(sync_timestamp_d):
os.makedirs(sync_timestamp_d, 0o755)
if not os.path.isdir(sync_timestamp_d_src):
os.makedirs(sync_timestamp_d_src, 0o755)
with open(sync_timestamp_f, 'w') as f:
f.write(str(self.svc.action_start_date)+'\n')
import shutil
shutil.copy2(sync_timestamp_f, sync_timestamp_d_src)
shutil.copy2(sync_timestamp_f, sched_timestamp_f)
ruser = self.svc.node.get_ruser(node)
options = self.mangle_options(ruser)
cmd = ['rsync'] + options
cmd += ['-R', sync_timestamp_f, sync_timestamp_f_src, ruser+'@'+node+':/']
self.call(cmd)
def sync(self, target):
self.add_resource_files_to_sync()
if target not in self.target.keys():
if not self.svc.options.cron:
self.log.info('%s => %s sync not applicable to %s',
" ".join(self.src), self.dst, target)
return 0
targets = self.nodes_to_sync(target)
if len(targets) == 0:
if not self.svc.options.cron:
self.log.info("no nodes to sync")
raise ex.syncNoNodesToSync
if "delay_snap" in self.tags:
if not hasattr(self.rset, 'snaps'):
Snap = lookup_snap_mod()
self.rset.snaps = Snap.Snap(self.rid)
self.rset.snaps.set_logger(self.log)
self.rset.snaps.try_snap(self.rset, target, rid=self.rid)
if hasattr(self, "alt_src"):
""" The pre_action() has provided us with a better source
to sync from. Use that
"""
src = self.alt_src
else:
src = self.src
if len(src) == 0:
if not self.svc.options.cron:
self.log.info("no files to sync")
raise ex.syncNoFilesToSync
for node in targets:
ruser = self.svc.node.get_ruser(node)
dst = ruser + '@' + node + ':' + self.dst
options = self.mangle_options(ruser)
cmd = ['rsync'] + options + src
cmd.append(dst)
if self.rid.startswith("sync#i"):
(ret, out, err) = self.call(cmd)
else:
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("node %s synchronization failed (%s => %s)" % (node, src, dst))
continue
self.sync_timestamp(node)
self.svc.need_postsync |= set([node])
return
def pre_action(self, action):
"""Actions to do before resourceSet iterates through the resources to
trigger action() on each one
"""
resources = [ r for r in self.rset.resources if not r.skip and not r.is_disabled() ]
if len(resources) == 0:
return
self.pre_sync_check_prd_svc_on_non_prd_node()
""" Is there at least one node to sync ?
"""
targets = set([])
rtargets = {0: set([])}
need_snap = False
for i, r in enumerate(resources):
if r.skip or r.is_disabled():
continue
rtargets[i] = set([])
if action == "sync_nodes":
rtargets[i] |= r.nodes_to_sync('nodes')
else:
rtargets[i] |= r.nodes_to_sync('drpnodes')
for node in rtargets[i].copy():
if not r.node_can_sync(node):
rtargets[i] -= set([node])
elif r.snap:
need_snap = True
for i in rtargets:
targets |= rtargets[i]
if len(targets) == 0:
if not self.svc.options.cron:
self.log.info("no node to sync")
raise ex.excAbortAction
if not need_snap:
self.log.debug("snap not needed")
return
Snap = lookup_snap_mod()
try:
self.rset.snaps = Snap.Snap(self.rid)
self.rset.snaps.set_logger(self.log)
self.rset.snaps.try_snap(self.rset, action)
except ex.syncNotSnapable:
raise ex.excError
def post_action(self, action):
"""Actions to do after resourceSet has iterated through the resources to
trigger action() on each one
"""
resources = [ r for r in self.rset.resources if not r.skip and not r.is_disabled() ]
if len(self.rset.resources) == 0:
return
if hasattr(self.rset, 'snaps'):
self.rset.snaps.snap_cleanup(self.rset)
def sync_nodes(self):
try:
self.sync("nodes")
except ex.syncNoFilesToSync:
if not self.svc.options.cron:
self.log.info("no file to sync")
pass
except ex.syncNoNodesToSync:
if not self.svc.options.cron:
self.log.info("no node to sync")
pass
def sync_drp(self):
try:
self.sync("drpnodes")
except ex.syncNoFilesToSync:
if not self.svc.options.cron:
self.log.info("no file to sync")
pass
except ex.syncNoNodesToSync:
if not self.svc.options.cron:
self.log.info("no node to sync")
pass
def _status(self, verbose=False):
""" mono-node service should return n/a as a sync state
"""
target = set([])
for i in self.target:
target |= self.target[i]
if len(target - set([rcEnv.nodename])) == 0:
self.status_log("no destination nodes", "info")
return rcStatus.NA
try:
self.get_options()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
""" sync state on nodes where the service is not UP
"""
s = self.svc.group_status(excluded_groups=set(["sync", "hb", "app"]))
if s['avail'].status != rcStatus.UP or \
(self.svc.clustertype in ['flex', 'autoflex'] and \
rcEnv.nodename != self.svc.flex_primary and \
s['avail'].status == rcStatus.UP):
if rcEnv.nodename not in target:
self.status_log("passive node not in destination nodes", "info")
return rcStatus.NA
if self.node_need_sync(rcEnv.nodename):
self.status_log("passive node needs update")
return rcStatus.WARN
else:
return rcStatus.UP
""" sync state on DRP nodes where the service is UP
"""
if 'drpnodes' in self.target and rcEnv.nodename in self.target['drpnodes']:
self.status_log("service up on drp node, sync disabled", "info")
return rcStatus.NA
""" sync state on nodes where the service is UP
"""
nodes = []
nodes += self.nodes_to_sync('nodes', state="late", status=True)
nodes += self.nodes_to_sync('drpnodes', state="late", status=True)
if len(nodes) == 0:
return rcStatus.UP
self.status_log("%s need update"%', '.join(nodes))
return rcStatus.DOWN
def get_options(self):
if which("rsync") is None:
raise ex.excError("rsync not found")
baseopts = '-HAXpogDtrlvx'
cmd = ['rsync', '--version']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("can not determine rsync capabilities")
if 'no xattrs' in out:
baseopts = baseopts.replace('X', '')
if 'no ACLs' in out:
baseopts = baseopts.replace('A', '')
options = [baseopts, '--stats', '--delete', '--force', '--timeout='+str(self.timeout)] + self.options
return options
def __init__(self,
rid=None,
src=[],
dst=None,
options=[],
target={},
dstfs=None,
snap=False,
bwlimit=None,
internal=False,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.rsync",
**kwargs)
if internal:
if rcEnv.drp_path in dst:
self.label = "rsync system files to drpnodes"
else:
self.label = "rsync svc config to %s"%(', '.join(target.keys()))
else:
_src = ', '.join(src)
if len(_src) > 300:
_src = _src[0:300]
_dst = ', '.join(target.keys())
self.label = "rsync %s to %s"%(_src, _dst)
self.src = src
self.dst = dst
self.dstfs = dstfs
self.snap = snap
self.target = target
self.bwlimit = bwlimit
self.internal = internal
self.timeout = 3600
self.options = options
def add_resource_files_to_sync(self):
if self.rid != "sync#i0":
return
for resource in self.svc.get_resources():
self.src += resource.files_to_sync()
def info(self):
self.add_resource_files_to_sync()
data = [
["src", " ".join(self.src)],
["dst", self.dst],
["dstfs", self.dstfs if self.dstfs else ""],
["bwlimit", self.bwlimit if self.bwlimit else ""],
["snap", str(self.snap).lower()],
["timeout", str(self.timeout)],
["target", " ".join(list(self.target.keys()))],
["options", " ".join(self.options)],
]
return self.fmt_info(data)
def __str__(self):
return "%s src=%s dst=%s options=%s target=%s" % (resSync.Sync.__str__(self),\
self.src, self.dst, self.get_options(), str(self.target))
opensvc-1.8~20170412/lib/rcCloudVcloud.py 0000644 0001750 0001750 00000003554 13073467726 020162 0 ustar jkelbert jkelbert import rcCloud
import rcExceptions as ex
import socket
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
except ImportError:
raise ex.excInitError("apache-libcloud module must be installed")
class Cloud(rcCloud.Cloud):
mode = 'vcloud'
def __init__(self, s, auth):
rcCloud.Cloud.__init__(self, s, auth)
if 'username' not in auth:
raise ex.excInitError("option 'username' is mandatory in vcloud section")
if 'password' not in auth:
raise ex.excInitError("option 'password' is mandatory in vcloud section")
if 'manager' not in auth:
raise ex.excInitError("option 'manager' is mandatory in vcloud section")
if 'api_version' not in auth:
auth['api_version'] = '1.5'
vcloud = get_driver(Provider.VCLOUD)
self.driver = vcloud(auth['username'], auth['password'],
host=auth['manager'], api_version=auth['api_version'])
def app_id(self, svcname):
return svcname.rstrip(self.auth['manager']).split('.')[-2]
def cloud_id(self):
return self.auth['manager']
def app_cloud_id(self):
_id = []
l = self.auth['username'].split('@')
if len(l) == 2:
_id.append(l[1])
_id.append(self.auth['manager'])
return '.'.join(_id)
def list_svcnames(self):
l = []
_id = self.app_cloud_id()
try:
vapps = self.driver.list_nodes()
except socket.error as e:
raise ex.excExecError("error connecting to %s cloud manager"%s)
for vapp in vapps:
__id = '.'.join((vapp.name, _id))
for vm in vapp.extra['vms']:
svcname = '.'.join((vm['name'], __id))
l.append((vm['name'], svcname))
return l
opensvc-1.8~20170412/lib/rcIfconfigAIX.py 0000644 0001750 0001750 00000005321 13073467726 020017 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilities import hexmask_to_dotted
import rcIfconfig
class ifconfig(rcIfconfig.ifconfig):
def get_mac(self, intf):
buff = self.get_netstat_in()
for line in buff.split("\n"):
l = line.split()
if len(l) < 4:
continue
if l[0] != intf.name:
continue
if not l[2].startswith("link"):
continue
if '.' not in l[3]:
return ""
return l[3].replace('.', ':')
return ""
def get_netstat_in(self):
if hasattr(self, "netstat_in_cache"):
return self.netstat_in_cache
cmd = ['netstat', '-in']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return ""
self.netstat_in_cache = out
return out
def parse(self, out):
prev = ''
prevprev = ''
for w in out.split():
if 'flags=' in w:
i = rcIfconfig.interface(prev.replace(':',''))
i.hwaddr = self.get_mac(i)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mtu = ''
i.ipaddr = []
i.mask = []
i.ip6addr = []
i.ip6mask = []
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
flags = w.split('<')[1].split('>')[0].split(',')
if 'UP' in flags:
i.flag_up = True
if 'BROADCAST' in flags:
i.flag_broadcast = True
if 'RUNNING' in flags:
i.flag_running = True
if 'MULTICAST' in flags:
i.flag_multicast = True
if 'LOOPBACK' in flags:
i.flag_loopback = True
elif 'inet' == prev:
i.ipaddr += [w]
elif 'netmask' == prev:
i.mask += [hexmask_to_dotted(w)]
elif 'inet6' == prev:
i.ip6addr += [w.split('/')[0]]
i.ip6mask += [w.split('/')[1]]
elif 'ether' == prev:
i.hwaddr = w
prevprev = prev
prev = w
def __init__(self, mcast=False):
rcIfconfig.ifconfig.__init__(self)
self.intf = []
out = Popen(['ifconfig', '-a'], stdout=PIPE).communicate()[0]
self.parse(out)
if __name__ == "__main__":
ifaces = ifconfig(mcast=True)
print(ifaces)
opensvc-1.8~20170412/lib/rcEva.py 0000644 0001750 0001750 00000010513 13073467726 016443 0 ustar jkelbert jkelbert from __future__ import print_function
from rcUtilities import justcall, which
from xml.etree.ElementTree import XML, fromstring
import rcExceptions as ex
import os
import ConfigParser
from rcGlobalEnv import rcEnv
def sssu(cmd, manager, username, password, array=None, sssubin=None):
if sssubin is None:
if which("sssu"):
sssubin = "sssu"
elif os.path.exists(os.path.join(rcEnv.pathbin, "sssu")):
sssubin = os.path.join(rcEnv.pathbin, "sssu")
else:
raise ex.excError("sssu command not found. set 'bin' in auth.conf section.")
os.chdir(rcEnv.pathtmp)
_cmd = [sssubin,
"select manager %s username=%s password=%s"%(manager, username, password)]
if array is not None:
_cmd += ["select system %s"%array]
_cmd += [cmd]
out, err, ret = justcall(_cmd)
print(" ".join(_cmd))
if "Error" in out:
print(_cmd)
print(out)
raise ex.excError("sssu command execution error")
return out, err
class Evas(object):
arrays = []
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = {}
for s in conf.sections():
try:
t = conf.get(s, 'type')
except:
continue
if t != "eva":
continue
try:
manager = conf.get(s, 'manager')
username = conf.get(s, 'username')
password = conf.get(s, 'password')
except Exception as e:
print("error parsing section", s, ":", e)
pass
try:
sssubin = conf.get(s, 'bin')
except:
sssubin = None
m[manager] = [username, password, sssubin]
del(conf)
done = []
for manager, creds in m.items():
username, password, sssbin = creds
out, err = sssu('ls system', manager, username, password, sssubin=sssubin)
_in = False
for line in out.split('\n'):
if 'Systems avail' in line:
_in = True
continue
if not _in:
continue
name = line.strip()
if self.filtering and name not in self.objects:
continue
self.arrays.append(Eva(name, manager, username, password, sssubin=sssubin))
done.append(name)
def __iter__(self):
for array in self.arrays:
yield(array)
class Eva(object):
def __init__(self, name, manager, username, password, sssubin=None):
self.name = name
self.manager = manager
self.username = username
self.password = password
self.sssubin = sssubin
#self.keys = ['disk_group']
self.keys = ['controller', 'disk_group', 'vdisk']
def sssu(self, cmd):
return sssu(cmd, self.manager, self.username, self.password, array=self.name, sssubin=self.sssubin)
def stripxml(self, buff):
try:
buff = buff[buff.index(""):]
except:
buff = ""
lines = buff.split('\n')
for i, line in enumerate(lines):
if line.startswith("\\"):
del lines[i]
lines = [''] + lines + [' ']
return '\n'.join(lines)
def get_controller(self):
cmd = 'ls controller full xml'
print("%s: %s"%(self.name, cmd))
buff = self.sssu(cmd)[0]
return self.stripxml(buff)
def get_disk_group(self):
cmd = 'ls disk_group full xml'
print("%s: %s"%(self.name, cmd))
buff = self.sssu(cmd)[0]
return self.stripxml(buff)
def get_vdisk(self):
cmd = 'ls vdisk full xml'
print("%s: %s"%(self.name, cmd))
buff = self.sssu(cmd)[0]
return self.stripxml(buff)
def get_lun(self):
cmd = 'ls lun full xml'
print("%s: %s"%(self.name, cmd))
buff = self.sssu(cmd)[0]
return self.stripxml(buff)
if __name__ == "__main__":
o = Evas()
for eva in o:
print(eva.get_controller())
opensvc-1.8~20170412/lib/rcPkgOSF1.py 0000644 0001750 0001750 00000002436 13073467726 017107 0 ustar jkelbert jkelbert from rcUtilities import justcall
from rcGlobalEnv import rcEnv
"""
Subset Status Description
------ ------ -----------
IOSFRBASE540 installed French Base System (French Support - Operating System)
IOSFRCDEHLP540 not installed French CDE Online Help (French Support - Windowing Environment)
IOSFRCDEMIN540 installed French CDE Minimum Runtime Environment(French Support - Windowing Environment)
IOSFRX11540 installed French Basic X Environment (French Support - Windowing Environment)
"""
def _list():
cmd = ['setld', '-i']
out, err, ret = justcall(cmd)
pkg = []
patch = []
pkgarch = ""
pkgvers = ""
if ret != 0:
return []
lines = out.split('\n')
if len(lines) < 3:
return []
for line in lines[2:]:
if "installed" not in line or "not installed" in line:
continue
name = line.split()[0]
if "Patch:" in line:
x = [rcEnv.nodename, name, pkgvers]
patch.append(x)
else:
x = [rcEnv.nodename, name, pkgvers, pkgarch]
pkg.append(x)
return pkg, patch
def listpkg():
return _list()[0]
def listpatch():
return _list()[1]
opensvc-1.8~20170412/lib/rcAssetDarwin.py 0000644 0001750 0001750 00000006152 13073467726 020160 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import rcAsset
class Asset(rcAsset.Asset):
def __init__(self, node):
rcAsset.Asset.__init__(self, node)
self.info = {}
(out, err, ret) = justcall(['system_profiler', 'SPHardwareDataType', 'SPSoftwareDataType'])
if ret == 0:
for line in out.split('\n'):
l = line.split(':')
if len(l) != 2: continue
self.info[l[0].strip()] = l[1].strip()
self.memslots = 0
self.membanks = 0
self._collect_memory_info()
def _collect_memory_info(self):
(out, err, ret) = justcall(['system_profiler', 'SPMemoryDataType'])
if ret == 0:
inBlock = False
for line in out.split('\n'):
line = line.strip()
if not inBlock and line.startswith("BANK"):
inBlock = True
self.memslots += 1
if inBlock and line.startswith("Status"):
l = line.split(':')
if 'OK' in l[1].strip():
self.membanks += 1
inBlock = False
def _get_mem_bytes(self):
if 'Memory' not in self.info:
return '0'
m = self.info['Memory'].split()
size = int(m[0])
unit = m[1]
if unit == 'GB':
size = size * 1024
elif unit == 'MB':
pass
else:
raise
return str(size)
def _get_mem_banks(self):
return str(self.membanks)
def _get_mem_slots(self):
return str(self.memslots)
def _get_os_vendor(self):
return 'Apple'
def _get_os_release(self):
if 'System Version' in self.info:
return self.info['System Version']
(out, err, ret) = justcall(['uname', '-r'])
if ret != 0:
return 'Unknown'
return out.split()[0]
def _get_os_kernel(self):
if 'Kernel Version' not in self.info:
return '0'
return self.info['Kernel Version']
def _get_os_arch(self):
cmd = ['uname', '-m']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
return out.split('\n')[0]
def _get_cpu_freq(self):
if 'Processor Speed' not in self.info:
return '0'
return self.info['Processor Speed']
def _get_cpu_cores(self):
if 'Total Number of Cores' not in self.info:
return '0'
return self.info['Total Number of Cores']
def _get_cpu_dies(self):
if 'Number of Processors' not in self.info:
return '0'
return self.info['Number of Processors']
def _get_cpu_model(self):
if 'Processor Name' not in self.info:
return '0'
return self.info['Processor Name']
def _get_serial(self):
if 'Hardware UUID' not in self.info:
return '0'
return self.info['Hardware UUID']
def _get_model(self):
if 'Model Name' not in self.info:
return '0'
return self.info['Model Name']
opensvc-1.8~20170412/lib/nodeLinux.py 0000644 0001750 0001750 00000000336 13073467726 017352 0 ustar jkelbert jkelbert import node
class Node(node.Node):
def shutdown(self):
cmd = ["shutdown", "-h"]
ret, out, err = self.vcall(cmd)
def _reboot(self):
cmd = ["reboot"]
ret, out, err = self.vcall(cmd)
opensvc-1.8~20170412/lib/checkFsUsageLinux.py 0000644 0001750 0001750 00000004016 13073467726 020757 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lP']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
# discard bind mounts: we get metric from the source anyway
if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'):
continue
if l[5].startswith('/Volumes'):
continue
if l[5].startswith('/media/'):
continue
if l[5].startswith('/run'):
continue
if l[5].startswith('/sys/'):
continue
if l[5].endswith('/shm'):
continue
if "/graph/" in l[5]:
continue
if "/aufs/mnt/" in l[5]:
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
svcname = self.find_svc(l[5])
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': svcname,
})
r.append({
'chk_instance': l[5]+".free",
'chk_value': l[3],
'chk_svcname': svcname,
})
r.append({
'chk_instance': l[5]+".size",
'chk_value': l[1],
'chk_svcname': svcname,
})
return r
opensvc-1.8~20170412/lib/rcIfconfigFreeBSD.py 0000644 0001750 0001750 00000003571 13073467726 020615 0 ustar jkelbert jkelbert from subprocess import *
import rcIfconfig
class ifconfig(rcIfconfig.ifconfig):
def parse(self, out):
prev = ''
prevprev = ''
for w in out.split():
if 'flags=' in w:
i = rcIfconfig.interface(prev.replace(':',''))
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mtu = ''
i.ipaddr = []
i.mask = []
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
flags = w.split('<')[1].split('>')[0].split(',')
if 'UP' in flags:
i.flag_up = True
if 'BROADCAST' in flags:
i.flag_broadcast = True
if 'RUNNING' in flags:
i.flag_running = True
if 'MULTICAST' in flags:
i.flag_multicast = True
if 'LOOPBACK' in flags:
i.flag_loopback = True
elif 'inet' == prev:
i.ipaddr += [w]
elif 'inet6' == prev:
i.ip6addr += [w.split('%')[0]]
elif 'netmask' == prev:
i.mask += [w]
elif 'prefixlen' == prev:
i.ip6mask += [w]
elif 'ether' == prev:
i.hwaddr = w
prevprev = prev
prev = w
def __init__(self, mcast=False):
rcIfconfig.ifconfig.__init__(self, mcast=mcast)
out = Popen(['ifconfig', '-a'], stdout=PIPE).communicate()[0]
self.parse(out)
if __name__ == "__main__":
o = ifconfig()
print o
opensvc-1.8~20170412/lib/rcMounts.py 0000644 0001750 0001750 00000004176 13073467726 017225 0 ustar jkelbert jkelbert import os
from subprocess import *
from rcUtilities import bdecode
class Mount:
def __init__(self, dev, mnt, type, mnt_opt):
self.dev = dev.rstrip('/')
self.mnt = mnt.rstrip('/')
if mnt is '/':
self.mnt = mnt
self.type = type
self.mnt_opt = mnt_opt
def __str__(self):
return "Mount: dev[%s] mnt[%s] type[%s] options[%s]" % \
(self.dev,self.mnt,self.type,self.mnt_opt)
class Mounts:
def __init__(self):
""" OS dependent """
self.mounts = []
def __iter__(self):
return iter(self.mounts)
def match_mount(self):
""" OS dependent """
pass
def mount(self, dev, mnt):
for i in self.mounts:
if self.match_mount(i, dev, mnt):
return i
return None
def has_mount(self, dev, mnt):
for i in self.mounts:
if self.match_mount(i, dev, mnt):
return True
return False
def has_param(self, param, value):
for i in self.mounts:
if getattr(i, param) == value:
return i
return None
def sort(self, key='mnt', reverse=False):
if len(self.mounts) == 0:
return
if key not in ('mnt', 'dev', 'type'):
return
self.mounts.sort(key=lambda x: getattr(x, key), reverse=reverse)
def get_fpath_dev(self, fpath):
last = False
d = fpath
while not last:
d = os.path.dirname(d)
m = self.has_param("mnt", d)
if m:
return m.dev
if d == os.sep:
last = True
def get_src_dir_dev(self, dev):
"""Given a directory path, return its hosting device
"""
p = Popen(self.df_one_cmd + [dev], stdout=PIPE, stderr=STDOUT, close_fds=True)
out, err = p.communicate()
if p.returncode != 0:
return
out = bdecode(out).lstrip()
return out.split()[0]
def __str__(self):
output="%s" % (self.__class__.__name__)
for m in self.mounts:
output+="\n %s" % m.__str__()
return output
opensvc-1.8~20170412/lib/checkFsUsageFreeBSD.py 0000644 0001750 0001750 00000002674 13073467726 021102 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lP']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
# discard bind mounts: we get metric from the source anyway
if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'):
continue
if l[5].startswith('/Volumes'):
continue
if l[5].startswith('/run'):
continue
if l[5].startswith('/sys/'):
continue
if l[5] == "/dev/shm":
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': self.find_svc(l[5]),
})
return r
opensvc-1.8~20170412/lib/wmi.py 0000644 0001750 0001750 00000133526 13073467726 016211 0 ustar jkelbert jkelbert """
Windows Management Instrumentation (WMI) is Microsoft's answer to
the DMTF's Common Information Model. It allows you to query just
about any conceivable piece of information from any computer which
is running the necessary agent and over which have you the
necessary authority.
Since the COM implementation doesn't give much away to Python
programmers, I've wrapped it in some lightweight classes with
some getattr / setattr magic to ease the way. In particular:
* The :class:`_wmi_namespace` object itself will determine its classes
and allow you to return all instances of any of them by
using its name as an attribute::
disks = wmi.WMI ().Win32_LogicalDisk ()
* In addition, you can specify what would become the WHERE clause
as keyword parameters::
fixed_disks = wmi.WMI ().Win32_LogicalDisk (DriveType=3)
* The objects returned by a WMI lookup are wrapped in a Python
class which determines their methods and classes and allows
you to access them as though they were Python classes. The
methods only allow named parameters::
for p in wmi.WMI ().Win32_Process (Name="notepad.exe"):
p.Terminate (Result=1)
* Doing a print() on one of the WMI objects will result in its
`GetObjectText\_` method being called, which usually produces
a meaningful printout of current values.
The repr of the object will include its full WMI path,
which lets you get directly to it if you need to.
* You can get the associators and references of an object as
a list of python objects by calling the associators () and
references () methods on a WMI Python object::
for p in wmi.WMI ().Win32_Process (Name="notepad.exe"):
for r in p.references ():
print(r)
.. note::
Don't do this on a Win32_ComputerSystem object; it will
take all day and kill your machine!
* WMI classes (as opposed to instances) are first-class
objects, so you can get hold of a class, and call
its methods or set up a watch against it::
process = wmi.WMI ().Win32_Process
process.Create (CommandLine="notepad.exe")
* To make it easier to use in embedded systems and py2exe-style
executable wrappers, the module will not force early Dispatch.
To do this, it uses a handy hack by Thomas Heller for easy access
to constants.
Typical usage will be::
import wmi
vodev1 = wmi.WMI ("vodev1")
for disk in vodev1.Win32_LogicalDisk ():
if disk.DriveType == 3:
space = 100 * long (disk.FreeSpace) / long (disk.Size)
print("%s has %d%% free" % (disk.Name, space))
Many thanks, obviously to Mark Hammond for creating the win32all
extensions, but also to Alex Martelli and Roger Upole, whose
c.l.py postings pointed me in the right direction.
Thanks especially in release 1.2 to Paul Tiemann for his code
contributions and robust testing.
"""
__VERSION__ = __version__ = "1.4.9"
_DEBUG = False
import sys
import datetime
import re
import struct
import warnings
from win32com.client import GetObject, Dispatch
import pywintypes
def signed_to_unsigned (signed):
"""Convert a (possibly signed) long to unsigned hex. Useful
when converting a COM error code to the more conventional
8-digit hex::
print("%08X" % signed_to_unsigned (-2147023174))
"""
unsigned, = struct.unpack ("L", struct.pack ("l", signed))
return unsigned
class SelfDeprecatingDict (object):
"""Provides for graceful degradation of objects which
are currently dictionaries (and therefore accessed via
`.keys`, `.items`, etc.) into lists. Wraps an existing
`dict` and allows it to be addressed as a `dict` or as a
`list` during an interregnum, issuing a `DeprecationWarning`
if accessed as a `dict`.
"""
dict_only = set (dir (dict)).difference (dir (list))
def __init__ (self, dictlike):
self.dict = dict (dictlike)
self.list = list (self.dict)
def __getattr__ (self, attribute):
if attribute in self.dict_only:
warnings.warn ("In future this will be a list and not a dictionary", DeprecationWarning)
return getattr (self.dict, attribute)
else:
return getattr (self.list, attribute)
def __iter__ (self):
return iter (self.list)
def __str__ (self):
return str (self.list)
def __repr__ (self):
return repr (self.list)
def __getitem__ (self, item):
try:
return self.list[item]
except TypeError:
return self.dict[item]
class ProvideConstants (object):
"""When called on a ``win32com.client.Dispatch`` object,
provides lazy access to constants defined in the typelib.
They can then be accessed as attributes of the :attr:`_constants`
property. (From Thomas Heller on c.l.py).
"""
def __init__(self, comobj):
comobj.__dict__["_constants"] = self
self.__typecomp = \
comobj._oleobj_.GetTypeInfo().GetContainingTypeLib()[0].GetTypeComp()
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
raise AttributeError (name)
result = self.__typecomp.Bind(name)
if not result[0]:
raise AttributeError (name)
return result[1].value
obj = GetObject ("winmgmts:")
ProvideConstants (obj)
wbemErrInvalidQuery = obj._constants.wbemErrInvalidQuery
wbemErrTimedout = obj._constants.wbemErrTimedout
wbemFlagReturnImmediately = obj._constants.wbemFlagReturnImmediately
wbemFlagForwardOnly = obj._constants.wbemFlagForwardOnly
#
# Exceptions
#
class x_wmi (Exception):
"""Ancestor of all wmi-related exceptions. Keeps track of
an info message and the underlying COM error if any, exposed
as the :attr:`com_error` attribute.
"""
def __init__ (self, info="", com_error=None):
self.info = info
self.com_error = com_error
def __str__ (self):
return "" % (
self.info or "Unexpected COM Error",
self.com_error or "(no underlying exception)"
)
class x_wmi_invalid_query (x_wmi):
"Raised when a WMI returns `wbemErrInvalidQuery`"
pass
class x_wmi_timed_out (x_wmi):
"Raised when a watcher times out"
pass
class x_wmi_no_namespace (x_wmi):
"""Raised when an attempt is made to query or watch
from a class without a namespace.
"""
pass
class x_access_denied (x_wmi):
"Raised when WMI raises 80070005"
pass
class x_wmi_authentication (x_wmi):
"Raised when an invalid combination of authentication properties is attempted when connecting"
pass
class x_wmi_uninitialised_thread (x_wmi):
"""Raised when WMI returns 800401E4 on connection, usually
indicating that no COM threading model has been initialised
"""
pass
WMI_EXCEPTIONS = {
signed_to_unsigned (wbemErrInvalidQuery) : x_wmi_invalid_query,
signed_to_unsigned (wbemErrTimedout) : x_wmi_timed_out,
0x80070005 : x_access_denied,
0x80041003 : x_access_denied,
0x800401E4 : x_wmi_uninitialised_thread,
}
def handle_com_error (err=None):
"""Convenience wrapper for displaying all manner of COM errors.
Raises a :exc:`x_wmi` exception with more useful information attached
:param err: The structure attached to a `pywintypes.com_error`
"""
if err is None:
_, err, _ = sys.exc_info ()
hresult_code, hresult_name, additional_info, parameter_in_error = err.args
hresult_code = signed_to_unsigned (hresult_code)
exception_string = ["%s - %s" % (hex (hresult_code), hresult_name)]
scode = None
if additional_info:
wcode, source_of_error, error_description, whlp_file, whlp_context, scode = additional_info
scode = signed_to_unsigned (scode)
exception_string.append (" Error in: %s" % source_of_error)
exception_string.append (" %s - %s" % (hex (scode), (error_description or "").strip ()))
for error_code, klass in WMI_EXCEPTIONS.items ():
if error_code in (hresult_code, scode):
break
else:
klass = x_wmi
raise klass (com_error=err)
BASE = datetime.datetime (1601, 1, 1)
def from_1601 (ns100):
return BASE + datetime.timedelta (microseconds=int (ns100) / 10)
def from_time (year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None):
"""Convenience wrapper to take a series of date/time elements and return a WMI time
of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or
omitted altogether. If omitted, they will be replaced in the output string
by a series of stars of the appropriate length.
:param year: The year element of the date/time
:param month: The month element of the date/time
:param day: The day element of the date/time
:param hours: The hours element of the date/time
:param minutes: The minutes element of the date/time
:param seconds: The seconds element of the date/time
:param microseconds: The microseconds element of the date/time
:param timezone: The timeezone element of the date/time
:returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU`
"""
def str_or_stars (i, length):
if i is None:
return "*" * length
else:
return str (i).rjust (length, "0")
wmi_time = ""
wmi_time += str_or_stars (year, 4)
wmi_time += str_or_stars (month, 2)
wmi_time += str_or_stars (day, 2)
wmi_time += str_or_stars (hours, 2)
wmi_time += str_or_stars (minutes, 2)
wmi_time += str_or_stars (seconds, 2)
wmi_time += "."
wmi_time += str_or_stars (microseconds, 6)
if timezone >= 0:
wmi_time += "+"
else:
wmi_time += "-"
timezone = abs (timezone)
wmi_time += str_or_stars (timezone, 3)
return wmi_time
def to_time (wmi_time):
"""Convenience wrapper to take a WMI datetime string of the form
yyyymmddHHMMSS.mmmmmm+UUU and return a 9-tuple containing the
individual elements, or None where string contains placeholder
stars.
:param wmi_time: The WMI datetime string in `yyyymmddHHMMSS.mmmmmm+UUU` format
:returns: A 9-tuple of (year, month, day, hours, minutes, seconds, microseconds, timezone)
"""
def int_or_none (s, start, end):
try:
return int (s[start:end])
except ValueError:
return None
year = int_or_none (wmi_time, 0, 4)
month = int_or_none (wmi_time, 4, 6)
day = int_or_none (wmi_time, 6, 8)
hours = int_or_none (wmi_time, 8, 10)
minutes = int_or_none (wmi_time, 10, 12)
seconds = int_or_none (wmi_time, 12, 14)
microseconds = int_or_none (wmi_time, 15, 21)
timezone = wmi_time[22:]
if timezone == "***":
timezone = None
return year, month, day, hours, minutes, seconds, microseconds, timezone
def _set (obj, attribute, value):
"""Helper function to add an attribute directly into the instance
dictionary, bypassing possible `__getattr__` calls
:param obj: Any python object
:param attribute: String containing attribute name
:param value: Any python object
"""
obj.__dict__[attribute] = value
class _wmi_method:
"""A currying sort of wrapper around a WMI method name. It
abstract's the method's parameters and can be called like
a normal Python object passing in the parameter values.
Output parameters are returned from the call as a tuple.
In addition, the docstring is set up as the method's
signature, including an indication as to whether any
given parameter is expecting an array, and what
special privileges are required to call the method.
"""
def __init__ (self, ole_object, method_name):
"""
:param ole_object: The WMI class/instance whose method is to be called
:param method_name: The name of the method to be called
"""
try:
self.ole_object = Dispatch (ole_object)
self.method = ole_object.Methods_ (method_name)
self.qualifiers = {}
for q in self.method.Qualifiers_:
self.qualifiers[q.Name] = q.Value
self.provenance = "\n".join (self.qualifiers.get ("MappingStrings", []))
self.in_parameters = self.method.InParameters
self.out_parameters = self.method.OutParameters
if self.in_parameters is None:
self.in_parameter_names = []
else:
self.in_parameter_names = [(i.Name, i.IsArray) for i in self.in_parameters.Properties_]
if self.out_parameters is None:
self.out_parameter_names = []
else:
self.out_parameter_names = [(i.Name, i.IsArray) for i in self.out_parameters.Properties_]
doc = "%s (%s) => (%s)" % (
method_name,
", ".join ([name + ("", "[]")[is_array] for (name, is_array) in self.in_parameter_names]),
", ".join ([name + ("", "[]")[is_array] for (name, is_array) in self.out_parameter_names])
)
privileges = self.qualifiers.get ("Privileges", [])
if privileges:
doc += " | Needs: " + ", ".join (privileges)
self.__doc__ = doc
except pywintypes.com_error:
handle_com_error ()
def __call__ (self, *args, **kwargs):
"""Execute the call to a WMI method, returning
a tuple (even if is of only one value) containing
the out and return parameters.
"""
try:
if self.in_parameters:
parameter_names = {}
for name, is_array in self.in_parameter_names:
parameter_names[name] = is_array
parameters = self.in_parameters
#
# Check positional parameters first
#
for n_arg in range (len (args)):
arg = args[n_arg]
parameter = parameters.Properties_[n_arg]
if parameter.IsArray:
try: list (arg)
except TypeError: raise TypeError ("parameter %d must be iterable" % n_arg)
parameter.Value = arg
#
# If any keyword param supersedes a positional one,
# it'll simply overwrite it.
#
for k, v in kwargs.items ():
is_array = parameter_names.get (k)
if is_array is None:
raise AttributeError ("%s is not a valid parameter for %s" % (k, self.__doc__))
else:
if is_array:
try: list (v)
except TypeError: raise TypeError ("%s must be iterable" % k)
parameters.Properties_ (k).Value = v
result = self.ole_object.ExecMethod_ (self.method.Name, self.in_parameters)
else:
result = self.ole_object.ExecMethod_ (self.method.Name)
results = []
for name, is_array in self.out_parameter_names:
value = result.Properties_ (name).Value
if is_array:
#
# Thanks to Jonas Bjering for bug report and patch
#
results.append (list (value or []))
else:
results.append (value)
return tuple (results)
except pywintypes.com_error:
handle_com_error ()
def __repr__ (self):
return "" % self.__doc__
class _wmi_property (object):
def __init__ (self, property):
self.property = property
self.name = property.Name
self.value = property.Value
self.qualifiers = dict ((q.Name, q.Value) for q in property.Qualifiers_)
self.type = self.qualifiers.get ("CIMTYPE", None)
def set (self, value):
self.property.Value = value
def __repr__ (self):
return "" % self.name
def __getattr__ (self, attr):
return getattr (self.property, attr)
#
# class _wmi_object
#
class _wmi_object:
"""The heart of the WMI module: wraps the objects returned by COM
ISWbemObject interface and provide readier access to their properties
and methods resulting in a more Pythonic interface. Not usually
instantiated directly, rather as a result of calling a :class:`_wmi_class`
on the parent :class:`_wmi_namespace`.
If you get hold of a WMI-related COM object from some other
source than this module, you can wrap it in one of these objects
to get the benefits of the module::
import win32com.client
import wmi
wmiobj = win32com.client.GetObject ("winmgmts:Win32_LogicalDisk.DeviceID='C:'")
c_drive = wmi._wmi_object (wmiobj)
print(c_drive)
"""
def __init__ (self, ole_object, instance_of=None, fields=[], property_map={}):
try:
_set (self, "ole_object", ole_object)
_set (self, "id", ole_object.Path_.DisplayName.lower ())
_set (self, "_instance_of", instance_of)
_set (self, "properties", {})
_set (self, "methods", {})
_set (self, "property_map", property_map)
_set (self, "_associated_classes", None)
_set (self, "_keys", None)
if fields:
for field in fields:
self.properties[field] = None
else:
for p in ole_object.Properties_:
self.properties[p.Name] = None
for m in ole_object.Methods_:
self.methods[m.Name] = None
_set (self, "_properties", self.properties.keys ())
_set (self, "_methods", self.methods.keys ())
_set (self, "qualifiers", dict ((q.Name, q.Value) for q in self.ole_object.Qualifiers_))
except pywintypes.com_error:
handle_com_error ()
def __lt__ (self, other):
return self.id < other.id
def __str__ (self):
"""For a call to print([object]) return the OLE description
of the properties / values of the object
"""
try:
return self.ole_object.GetObjectText_ ()
except pywintypes.com_error:
handle_com_error ()
def __repr__ (self):
"""
Indicate both the fact that this is a wrapped WMI object
and the WMI object's own identifying class.
"""
try:
return "<%s: %s>" % (self.__class__.__name__, self.Path_.Path.encode ("ascii", "backslashreplace"))
except pywintypes.com_error:
handle_com_error ()
def _cached_properties (self, attribute):
if self.properties[attribute] is None:
self.properties[attribute] = _wmi_property (self.ole_object.Properties_ (attribute))
return self.properties[attribute]
def _cached_methods (self, attribute):
if self.methods[attribute] is None:
self.methods[attribute] = _wmi_method (self.ole_object, attribute)
return self.methods[attribute]
def __getattr__ (self, attribute):
"""
Attempt to pass attribute calls to the proxied COM object.
If the attribute is recognised as a property, return its value;
if it is recognised as a method, return a method wrapper which
can then be called with parameters; otherwise pass the lookup
on to the underlying object.
"""
try:
if attribute in self.properties:
property = self._cached_properties (attribute)
factory = self.property_map.get (attribute, self.property_map.get (property.type, lambda x: x))
value = factory (property.value)
#
# If this is an association, certain of its properties
# are actually the paths to the aspects of the association,
# so translate them automatically into WMI objects.
#
if property.type.startswith ("ref:"):
return WMI (moniker=value)
else:
return value
elif attribute in self.methods:
return self._cached_methods (attribute)
else:
return getattr (self.ole_object, attribute)
except pywintypes.com_error:
handle_com_error ()
def __setattr__ (self, attribute, value):
"""If the attribute to be set is valid for the proxied
COM object, set that objects's parameter value; if not,
raise an exception.
"""
try:
if attribute in self.properties:
self._cached_properties (attribute).set (value)
if self.ole_object.Path_.Path:
self.ole_object.Put_ ()
else:
raise AttributeError (attribute)
except pywintypes.com_error:
handle_com_error ()
def __eq__ (self, other):
return self.id == other.id
def __hash__ (self):
return hash (self.id)
def _getAttributeNames (self):
"""Return list of methods/properties for IPython completion"""
attribs = [str (x) for x in self.methods.keys ()]
attribs.extend ([str (x) for x in self.properties.keys ()])
return attribs
def _get_keys (self):
"""A WMI object is uniquely defined by a set of properties
which constitute its keys. Lazily retrieves the keys for this
instance or class.
:returns: list of key property names
"""
# NB You can get the keys of an instance more directly, via
# Path\_.Keys but this doesn't apply to classes. The technique
# here appears to work for both.
if self._keys is None:
_set (self, "_keys", [])
for property in self.ole_object.Properties_:
for qualifier in property.Qualifiers_:
if qualifier.Name == "key" and qualifier.Value:
self._keys.append (property.Name)
return self._keys
keys = property (_get_keys)
def wmi_property (self, property_name):
"""Return the cached object representing one property
of this object
"""
return _wmi_property (self.ole_object.Properties_ (property_name))
def put (self):
"""Push all outstanding property updates back to the
WMI database.
"""
self.ole_object.Put_ ()
def set (self, **kwargs):
"""Set several properties of the underlying object
at one go. This is particularly useful in combination
with the new () method below. However, an instance
which has been spawned in this way won't have enough
information to write pack, so only try if the
instance has a path.
"""
if kwargs:
try:
for attribute, value in kwargs.items ():
if attribute in self.properties:
self._cached_properties (attribute).set (value)
else:
raise AttributeError (attribute)
#
# Only try to write the attributes
# back if the object exists.
#
if self.ole_object.Path_.Path:
self.ole_object.Put_ ()
except pywintypes.com_error:
handle_com_error ()
def path (self):
"""Return the WMI URI to this object. Can be used to
determine the path relative to the parent namespace::
pp0 = wmi.WMI ().Win32_ParallelPort ()[0]
print(pp0.path ().RelPath)
.. Do more with this
"""
try:
return self.ole_object.Path_
except pywintypes.com_error:
handle_com_error ()
def derivation (self):
"""Return a tuple representing the object derivation for
this object, with the most specific object first::
pp0 = wmi.WMI ().Win32_ParallelPort ()[0]
print(' <- '.join (pp0.derivation ()))
"""
try:
return self.ole_object.Derivation_
except pywintypes.com_error:
handle_com_error ()
def _cached_associated_classes (self):
if self._associated_classes is None:
if isinstance (self, _wmi_class):
params = {'bSchemaOnly' : True}
else:
params = {'bClassesOnly' : True}
try:
associated_classes = dict (
(assoc.Path_.Class, _wmi_class (self._namespace, assoc)) for
assoc in self.ole_object.Associators_ (**params)
)
_set (self, "_associated_classes", associated_classes)
except pywintypes.com_error:
handle_com_error ()
return self._associated_classes
associated_classes = property (_cached_associated_classes)
def associators (self, wmi_association_class="", wmi_result_class=""):
"""Return a list of objects related to this one, optionally limited
either by association class (ie the name of the class which relates
them) or by result class (ie the name of the class which would be
retrieved)::
c = wmi.WMI ()
pp = c.Win32_ParallelPort ()[0]
for i in pp.associators (wmi_association_class="Win32_PortResource"):
print(i)
for i in pp.associators (wmi_result_class="Win32_PnPEntity"):
print(i)
"""
try:
return [
_wmi_object (i) for i in \
self.ole_object.Associators_ (
strAssocClass=wmi_association_class,
strResultClass=wmi_result_class
)
]
except pywintypes.com_error:
handle_com_error ()
def references (self, wmi_class=""):
"""Return a list of associations involving this object, optionally
limited by the result class (the name of the association class).
NB Associations are treated specially; although WMI only returns
the string corresponding to the instance of each associated object,
this module will automatically convert that to the object itself::
c = wmi.WMI ()
sp = c.Win32_SerialPort ()[0]
for i in sp.references ():
print(i)
for i in sp.references (wmi_class="Win32_SerialPortSetting"):
print(i)
"""
#
# FIXME: Allow an actual class to be passed in, using
# its .Path_.RelPath property to determine the string
#
try:
return [_wmi_object (i) for i in self.ole_object.References_ (strResultClass=wmi_class)]
except pywintypes.com_error:
handle_com_error ()
#
# class _wmi_event
#
class _wmi_event (_wmi_object):
"""Slight extension of the _wmi_object class to allow
objects which are the result of events firing to return
extra information such as the type of event.
"""
event_type_re = re.compile ("__Instance(Creation|Modification|Deletion)Event")
def __init__ (self, event, event_info, fields=[]):
_wmi_object.__init__ (self, event, fields=fields)
_set (self, "event_type", None)
_set (self, "timestamp", None)
_set (self, "previous", None)
if event_info:
event_type = self.event_type_re.match (event_info.Path_.Class).group (1).lower ()
_set (self, "event_type", event_type)
if hasattr (event_info, "TIME_CREATED"):
_set (self, "timestamp", from_1601 (event_info.TIME_CREATED))
if hasattr (event_info, "PreviousInstance"):
_set (self, "previous", event_info.PreviousInstance)
#
# class _wmi_class
#
class _wmi_class (_wmi_object):
"""Currying class to assist in issuing queries against
a WMI namespace. The idea is that when someone issues
an otherwise unknown method against the WMI object, if
it matches a known WMI class a query object will be
returned which may then be called with one or more params
which will form the WHERE clause::
c = wmi.WMI ()
c_drives = c.Win32_LogicalDisk (Name='C:')
"""
def __init__ (self, namespace, wmi_class):
_wmi_object.__init__ (self, wmi_class)
_set (self, "_class_name", wmi_class.Path_.Class)
if namespace:
_set (self, "_namespace", namespace)
else:
class_moniker = wmi_class.Path_.DisplayName
winmgmts, namespace_moniker, class_name = class_moniker.split (":")
namespace = _wmi_namespace (GetObject (winmgmts + ":" + namespace_moniker), False)
_set (self, "_namespace", namespace)
def __getattr__ (self, attribute):
try:
if attribute in self.properties:
return _wmi_property (self.Properties_ (attribute))
else:
return _wmi_object.__getattr__ (self, attribute)
except pywintypes.com_error:
handle_com_error ()
def query (self, fields=[], **where_clause):
"""Make it slightly easier to query against the class,
by calling the namespace's query with the class preset.
Won't work if the class has been instantiated directly.
"""
#
# FIXME: Not clear if this can ever happen
#
if self._namespace is None:
raise x_wmi_no_namespace ("You cannot query directly from a WMI class")
try:
field_list = ", ".join (fields) or "*"
wql = "SELECT " + field_list + " FROM " + self._class_name
if where_clause:
wql += " WHERE " + " AND ". join (["%s = %r" % (k, str (v)) for k, v in where_clause.items ()])
return self._namespace.query (wql, self, fields)
except pywintypes.com_error:
handle_com_error ()
__call__ = query
def watch_for (
self,
notification_type="operation",
delay_secs=1,
fields=[],
**where_clause
):
if self._namespace is None:
raise x_wmi_no_namespace ("You cannot watch directly from a WMI class")
valid_notification_types = ("operation", "creation", "deletion", "modification")
if notification_type.lower () not in valid_notification_types:
raise x_wmi ("notification_type must be one of %s" % ", ".join (valid_notification_types))
return self._namespace.watch_for (
notification_type=notification_type,
wmi_class=self,
delay_secs=delay_secs,
fields=fields,
**where_clause
)
def instances (self):
"""Return a list of instances of the WMI class
"""
try:
return [_wmi_object (instance, self) for instance in self.Instances_ ()]
except pywintypes.com_error:
handle_com_error ()
def new (self, **kwargs):
"""This is the equivalent to the raw-WMI SpawnInstance\_
method. Note that there are relatively few uses for
this, certainly fewer than you might imagine. Most
classes which need to create a new *real* instance
of themselves, eg Win32_Process, offer a .Create
method. SpawnInstance\_ is generally reserved for
instances which are passed as parameters to such
`.Create` methods, a common example being the
`Win32_SecurityDescriptor`, passed to `Win32_Share.Create`
and other instances which need security.
The example here is `Win32_ProcessStartup`, which
controls the shown/hidden state etc. of a new
`Win32_Process` instance::
import win32con
import wmi
c = wmi.WMI ()
startup = c.Win32_ProcessStartup.new (ShowWindow=win32con.SW_SHOWMINIMIZED)
pid, retval = c.Win32_Process.Create (
CommandLine="notepad.exe",
ProcessStartupInformation=startup
)
.. warning::
previous versions of this docstring illustrated using this function
to create a new process. This is *not* a good example of its use;
it is better handled with something like the example above.
"""
try:
obj = _wmi_object (self.SpawnInstance_ (), self)
obj.set (**kwargs)
return obj
except pywintypes.com_error:
handle_com_error ()
#
# class _wmi_result
#
class _wmi_result:
"""Simple, data only result for targeted WMI queries which request
data only result classes via fetch_as_classes.
"""
def __init__(self, obj, attributes):
if attributes:
for attr in attributes:
self.__dict__[attr] = obj.Properties_ (attr).Value
else:
for p in obj.Properties_:
attr = p.Name
self.__dict__[attr] = obj.Properties_(attr).Value
#
# class WMI
#
class _wmi_namespace:
"""A WMI root of a computer system. The classes attribute holds a list
of the classes on offer. This means you can explore a bit with
things like this::
c = wmi.WMI ()
for i in c.classes:
if "user" in i.lower ():
print(i)
"""
def __init__ (self, namespace, find_classes):
_set (self, "_namespace", namespace)
#
# wmi attribute preserved for backwards compatibility
#
_set (self, "wmi", namespace)
self._classes = None
self._classes_map = {}
#
# Pick up the list of classes under this namespace
# so that they can be queried, and used as though
# properties of the namespace by means of the __getattr__
# hook below.
# If the namespace does not support SubclassesOf, carry on
# regardless
#
if find_classes:
_ = self.classes
def __repr__ (self):
return "<_wmi_namespace: %s>" % self.wmi
def __str__ (self):
return repr (self)
def _get_classes (self):
if self._classes is None:
self._classes = self.subclasses_of ()
return SelfDeprecatingDict (dict.fromkeys (self._classes))
classes = property (_get_classes)
def get (self, moniker):
try:
return _wmi_object (self.wmi.Get (moniker))
except pywintypes.com_error:
handle_com_error ()
def handle (self):
"""The raw OLE object representing the WMI namespace"""
return self._namespace
def subclasses_of (self, root="", regex=r".*"):
try:
SubclassesOf = self._namespace.SubclassesOf
except AttributeError:
return set ()
else:
return set (
c.Path_.Class
for c in SubclassesOf (root)
if re.match (regex, c.Path_.Class)
)
def instances (self, class_name):
"""Return a list of instances of the WMI class. This is
(probably) equivalent to querying with no qualifiers::
wmi.WMI ().instances ("Win32_LogicalDisk")
# should be the same as
wmi.WMI ().Win32_LogicalDisk ()
"""
try:
return [_wmi_object (obj) for obj in self._namespace.InstancesOf (class_name)]
except pywintypes.com_error:
handle_com_error ()
def new (self, wmi_class, **kwargs):
"""This is now implemented by a call to :meth:`_wmi_class.new`"""
return getattr (self, wmi_class).new (**kwargs)
new_instance_of = new
def _raw_query (self, wql):
"""Execute a WQL query and return its raw results. Use the flags
recommended by Microsoft to achieve a read-only, semi-synchronous
query where the time is taken while looping through.
NB Backslashes need to be doubled up.
"""
flags = wbemFlagReturnImmediately | wbemFlagForwardOnly
wql = wql.replace ("\\", "\\\\")
try:
return self._namespace.ExecQuery (strQuery=wql, iFlags=flags)
except pywintypes.com_error:
handle_com_error ()
def query (self, wql, instance_of=None, fields=[]):
"""Perform an arbitrary query against a WMI object, and return
a list of _wmi_object representations of the results.
"""
return [ _wmi_object (obj, instance_of, fields) for obj in self._raw_query(wql) ]
def fetch_as_classes (self, wmi_classname, fields=(), **where_clause):
"""Build and execute a wql query to fetch the specified list of fields from
the specified wmi_classname + where_clause, then return the results as
a list of simple class instances with attributes matching field_list.
If fields is left empty, select * and pre-load all class attributes for
each class returned.
"""
wql = "SELECT %s FROM %s" % (fields and ", ".join (fields) or "*", wmi_classname)
if where_clause:
wql += " WHERE " + " AND ".join (["%s = '%s'" % (k, v) for k, v in where_clause.items()])
return [_wmi_result (obj, fields) for obj in self._raw_query(wql)]
def fetch_as_lists (self, wmi_classname, fields, **where_clause):
"""Build and execute a wql query to fetch the specified list of fields from
the specified wmi_classname + where_clause, then return the results as
a list of lists whose values correspond to field_list.
"""
wql = "SELECT %s FROM %s" % (", ".join (fields), wmi_classname)
if where_clause:
wql += " WHERE " + " AND ".join (["%s = '%s'" % (k, v) for k, v in where_clause.items()])
results = []
for obj in self._raw_query(wql):
results.append ([obj.Properties_ (field).Value for field in fields])
return results
def watch_for (
self,
raw_wql=None,
notification_type="operation",
wmi_class=None,
delay_secs=1,
fields=[],
**where_clause
):
"""Set up an event tracker on a WMI event. This function
returns an wmi_watcher which can be called to get the
next event::
c = wmi.WMI ()
raw_wql = "SELECT * FROM __InstanceCreationEvent WITHIN 2 WHERE TargetInstance ISA 'Win32_Process'"
watcher = c.watch_for (raw_wql=raw_wql)
while 1:
process_created = watcher ()
print(process_created.Name)
# or
watcher = c.watch_for (
notification_type="Creation",
wmi_class="Win32_Process",
delay_secs=2,
Name='calc.exe'
)
calc_created = watcher ()
Now supports timeout on the call to watcher::
import pythoncom
import wmi
c = wmi.WMI (privileges=["Security"])
watcher1 = c.watch_for (
notification_type="Creation",
wmi_class="Win32_NTLogEvent",
Type="error"
)
watcher2 = c.watch_for (
notification_type="Creation",
wmi_class="Win32_NTLogEvent",
Type="warning"
)
while 1:
try:
error_log = watcher1 (500)
except wmi.x_wmi_timed_out:
pythoncom.PumpWaitingMessages ()
else:
print(error_log)
try:
warning_log = watcher2 (500)
except wmi.x_wmi_timed_out:
pythoncom.PumpWaitingMessages ()
else:
print(warning_log)
"""
if isinstance (wmi_class, _wmi_class):
class_name = wmi_class._class_name
else:
class_name = wmi_class
wmi_class = getattr (self, class_name)
is_extrinsic = "__ExtrinsicEvent" in wmi_class.derivation ()
if raw_wql:
wql = raw_wql
else:
fields = set (['TargetInstance'] + (fields or ["*"]))
field_list = ", ".join (fields)
if is_extrinsic:
if where_clause:
where = " WHERE " + " AND ".join (["%s = '%s'" % (k, v) for k, v in where_clause.items ()])
else:
where = ""
wql = "SELECT " + field_list + " FROM " + class_name + where
else:
if where_clause:
where = " AND " + " AND ".join (["TargetInstance.%s = '%s'" % (k, v) for k, v in where_clause.items ()])
else:
where = ""
wql = \
"SELECT %s FROM __Instance%sEvent WITHIN %d WHERE TargetInstance ISA '%s' %s" % \
(field_list, notification_type, delay_secs, class_name, where)
try:
return _wmi_watcher (
self._namespace.ExecNotificationQuery (wql),
is_extrinsic=is_extrinsic,
fields=fields
)
except pywintypes.com_error:
handle_com_error ()
def __getattr__ (self, attribute):
"""Offer WMI classes as simple attributes. Pass through any untrapped
unattribute to the underlying OLE object. This means that new or
unmapped functionality is still available to the module user.
"""
#
# Don't try to match against known classes as was previously
# done since the list may not have been requested
# (find_classes=False).
#
try:
return self._cached_classes (attribute)
except pywintypes.com_error:
return getattr (self._namespace, attribute)
def _cached_classes (self, class_name):
"""Standard caching helper which keeps track of classes
already retrieved by name and returns the existing object
if found. If this is the first retrieval, store it and
pass it back
"""
if class_name not in self._classes_map:
self._classes_map[class_name] = _wmi_class (self, self._namespace.Get (class_name))
return self._classes_map[class_name]
def _getAttributeNames (self):
"""Return list of classes for IPython completion engine"""
return [x for x in self.classes if not x.startswith ('__')]
#
# class _wmi_watcher
#
class _wmi_watcher:
"""Helper class for WMI.watch_for below (qv)"""
_event_property_map = {
"TargetInstance" : _wmi_object,
"PreviousInstance" : _wmi_object
}
def __init__ (self, wmi_event, is_extrinsic, fields=[]):
self.wmi_event = wmi_event
self.is_extrinsic = is_extrinsic
self.fields = fields
def __call__ (self, timeout_ms=-1):
"""When called, return the instance which caused the event. Supports
timeout in milliseconds (defaulting to infinite). If the watcher
times out, :exc:`x_wmi_timed_out` is raised. This makes it easy to support
watching for multiple objects.
"""
try:
event = self.wmi_event.NextEvent (timeout_ms)
if self.is_extrinsic:
return _wmi_event (event, None, self.fields)
else:
return _wmi_event (
event.Properties_ ("TargetInstance").Value,
_wmi_object (event, property_map=self._event_property_map),
self.fields
)
except pywintypes.com_error:
handle_com_error ()
PROTOCOL = "winmgmts:"
def connect (
computer="",
impersonation_level="",
authentication_level="",
authority="",
privileges="",
moniker="",
wmi=None,
namespace="",
suffix="",
user="",
password="",
find_classes=False,
debug=False
):
"""The WMI constructor can either take a ready-made moniker or as many
parts of one as are necessary. Eg::
c = wmi.WMI (moniker="winmgmts:{impersonationLevel=Delegate}//remote")
# or
c = wmi.WMI (computer="remote", privileges=["!RemoteShutdown", "Security"])
I daren't link to a Microsoft URL; they change so often. Try Googling for
WMI construct moniker and see what it comes back with.
For complete control, a named argument "wmi" can be supplied, which
should be a SWbemServices object, which you create yourself. Eg::
loc = win32com.client.Dispatch("WbemScripting.SWbemLocator")
svc = loc.ConnectServer(...)
c = wmi.WMI(wmi=svc)
This is the only way of connecting to a remote computer with a different
username, as the moniker syntax does not allow specification of a user
name.
If the `wmi` parameter is supplied, all other parameters are ignored.
"""
global _DEBUG
_DEBUG = debug
try:
try:
if wmi:
obj = wmi
elif moniker:
if not moniker.startswith (PROTOCOL):
moniker = PROTOCOL + moniker
obj = GetObject (moniker)
else:
if user:
if privileges or suffix:
raise x_wmi_authentication ("You can't specify privileges or a suffix as well as a username")
elif computer in (None, '', '.'):
raise x_wmi_authentication ("You can only specify user/password for a remote connection")
else:
obj = connect_server (
server=computer,
namespace=namespace,
user=user,
password=password,
authority=authority,
impersonation_level=impersonation_level,
authentication_level=authentication_level
)
else:
moniker = construct_moniker (
computer=computer,
impersonation_level=impersonation_level,
authentication_level=authentication_level,
authority=authority,
privileges=privileges,
namespace=namespace,
suffix=suffix
)
obj = GetObject (moniker)
wmi_type = get_wmi_type (obj)
if wmi_type == "namespace":
return _wmi_namespace (obj, find_classes)
elif wmi_type == "class":
return _wmi_class (None, obj)
elif wmi_type == "instance":
return _wmi_object (obj)
else:
raise x_wmi ("Unknown moniker type")
except pywintypes.com_error:
handle_com_error ()
except x_wmi_uninitialised_thread:
raise x_wmi_uninitialised_thread ("WMI returned a syntax error: you're probably running inside a thread without first calling pythoncom.CoInitialize[Ex]")
WMI = connect
def construct_moniker (
computer=None,
impersonation_level=None,
authentication_level=None,
authority=None,
privileges=None,
namespace=None,
suffix=None
):
security = []
if impersonation_level: security.append ("impersonationLevel=%s" % impersonation_level)
if authentication_level: security.append ("authenticationLevel=%s" % authentication_level)
#
# Use of the authority descriptor is invalid on the local machine
#
if authority and computer: security.append ("authority=%s" % authority)
if privileges: security.append ("(%s)" % ", ".join (privileges))
moniker = [PROTOCOL]
if security: moniker.append ("{%s}!" % ",".join (security))
if computer: moniker.append ("//%s/" % computer)
if namespace:
parts = re.split (r"[/\\]", namespace)
if parts[0] != 'root':
parts.insert (0, "root")
moniker.append ("/".join (parts))
if suffix: moniker.append (":%s" % suffix)
return "".join (moniker)
def get_wmi_type (obj):
try:
path = obj.Path_
except AttributeError:
return "namespace"
else:
if path.IsClass:
return "class"
else:
return "instance"
def connect_server (
server,
namespace = "",
user = "",
password = "",
locale = "",
authority = "",
impersonation_level="",
authentication_level="",
security_flags = 0x80,
named_value_set = None
):
"""Return a remote server running WMI
:param server: name of the server
:param namespace: namespace to connect to - defaults to whatever's defined as default
:param user: username to connect as, either local or domain (dom\\name or user@domain for XP)
:param password: leave blank to use current context
:param locale: desired locale in form MS_XXXX (eg MS_409 for Am En)
:param authority: either "Kerberos:" or an NT domain. Not needed if included in user
:param impersonation_level: valid WMI impersonation level
:param security_flags: if 0, connect will wait forever; if 0x80, connect will timeout at 2 mins
:param named_value_set: typically empty, otherwise a context-specific `SWbemNamedValueSet`
Example::
remote_connetion = wmi.connect_server (
server="remote_machine", user="myname", password="mypassword"
)
c = wmi.WMI (wmi=remote_connection)
"""
#
# Thanks to Matt Mercer for example code to set
# impersonation & authentication on ConnectServer
#
if impersonation_level:
try:
impersonation = getattr (obj._constants, "wbemImpersonationLevel%s" % impersonation_level.title ())
except AttributeError:
raise x_wmi_authentication ("No such impersonation level: %s" % impersonation_level)
else:
impersonation = None
if authentication_level:
try:
authentication = getattr (obj._constants, "wbemAuthenticationLevel%s" % authentication_level.title ())
except AttributeError:
raise x_wmi_authentication ("No such impersonation level: %s" % impersonation_level)
else:
authentication = None
server = Dispatch ("WbemScripting.SWbemLocator").\
ConnectServer (
server,
namespace,
user,
password,
locale,
authority,
security_flags,
named_value_set
)
if impersonation:
server.Security_.ImpersonationLevel = impersonation
if authentication:
server.Security_.AuthenticationLevel = authentication
return server
def Registry (
computer=None,
impersonation_level="Impersonate",
authentication_level="Default",
authority=None,
privileges=None,
moniker=None
):
warnings.warn ("This function can be implemented using wmi.WMI (namespace='DEFAULT').StdRegProv", DeprecationWarning)
if not moniker:
moniker = construct_moniker (
computer=computer,
impersonation_level=impersonation_level,
authentication_level=authentication_level,
authority=authority,
privileges=privileges,
namespace="default",
suffix="StdRegProv"
)
try:
return _wmi_object (GetObject (moniker))
except pywintypes.com_error:
handle_com_error ()
#
# Typical use test
#
if __name__ == '__main__':
system = WMI ()
for my_computer in system.Win32_ComputerSystem ():
print("Disks on", my_computer.Name)
for disk in system.Win32_LogicalDisk ():
print(disk.Caption, disk.Description, disk.ProviderName or "")
opensvc-1.8~20170412/lib/rcBtrfs.py 0000644 0001750 0001750 00000031252 13073467726 017013 0 ustar jkelbert jkelbert from rcUtilities import justcall, vcall
from rcUtilitiesLinux import label_to_dev
import sys
import os
import logging
from rcGlobalEnv import rcEnv
class InitError(Exception):
pass
class ExecError(Exception):
pass
class ExistError(Exception):
pass
class Btrfs(object):
log = None
#snapvol = ".osvcsnap"
snapvol = ""
def __init__(self, path=None, label=None, node=None, log=None):
self.path = path
self.label = label
self.node = node
if log is None:
if Btrfs.log is None:
Btrfs.log = logging.getLogger("BTRFS")
Btrfs.log.addHandler(logging.StreamHandler(sys.stdout))
Btrfs.log.setLevel(logging.INFO)
self.log = Btrfs.log
else:
self.log = log
if path is not None:
if not self.dir_exists(path):
raise InitError("path %s does not exist"%path)
self.get_label_from_path(path)
if self.label is None:
raise InitError("failed to determine btrfs label")
self.setup_rootvol()
self.path = self.rootdir
self.snapdir = os.path.join(self.rootdir, self.snapvol)
self.snapdir = os.path.normpath(self.snapdir)
def get_dev(self):
if hasattr(self, "dev"):
return
if self.node is None:
self.dev = label_to_dev("LABEL="+self.label)
else:
return
if self.dev is None:
self.dev = "LABEL="+self.label
def rmdir(self, path):
cmd = ['rmdir', path]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise ExecError("error removing dir %s:\n%s"%(path,err))
def dir_exists(self, path):
cmd = ['test', '-d', path]
out, err, ret = self.justcall(cmd)
if ret > 1:
raise ExecError("error joining remote node %s:\n%s"%(self.node, err))
if ret == 1:
return False
return True
def get_subvols(self, refresh=False):
"""
ID 256 parent 5 top level 5 path btrfssvc
ID 259 parent 256 top level 5 path btrfssvc/child
ID 260 parent 5 top level 5 path btrfssvc@sent
ID 261 parent 256 top level 5 path btrfssvc/child@sent
ID 262 parent 5 top level 5 path btrfssvc@tosend
ID 263 parent 256 top level 5 path btrfssvc/child@tosend
ID 264 parent 5 top level 5 path subdir/vol
ID 265 parent 256 top level 5 path btrfssvc/cross_mnt_snap
"""
if not refresh and hasattr(self, "subvols"):
return
self.subvols = {}
cmd = ['btrfs', 'subvol', 'list', '-p', self.path]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise InitError("error running btrfs subvol list %s:\n"%self.path+err)
for line in out.split("\n"):
if len(line) == 0:
continue
l = line.split()
subvol = {}
subvol['id'] = l[1]
subvol['parent_id'] = l[3]
subvol['top'] = l[6]
subvol['path'] = line[line.index(" path ")+6:]
self.subvols[subvol['id']] = subvol
def subvol_delete(self, subvol=[], recursive=False):
opts = []
if recursive:
opts.appendi('-R')
if isinstance(subvol, list):
subvols = subvol
else:
subvols = [subvol]
# delete in descending depth order
subvols.sort(reverse=True)
cmd = []
for subvol in subvols:
if not self.has_subvol(subvol):
continue
cmd += ['btrfs', 'subvolume', 'delete'] + opts + [subvol, '&&']
if len(cmd) == 0:
return
cmd = cmd[:-1]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError()
def get_subvols_in_path(self, path):
self.get_subvols(refresh=True)
head = self.path_to_subvol(path)
subvols = [path]
for subvol in self.subvols.values():
if subvol['path'].startswith(head+'/'):
subvols.append(self.rootdir+'/'+subvol['path'])
return subvols
def snapshot(self, origin, snap, readonly=False, recursive=False):
if self.has_subvol(snap):
raise ExistError("snapshot %s already exists"%snap)
opts = []
if recursive:
opts.append('-R')
if readonly:
opts.append('-r')
cmd = ['btrfs', 'subvolume', 'snapshot'] + opts + [origin, snap]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError(err)
def has_snapvol(self):
return self.has_subvol(self.snapvol)
def path_to_subvol(self, path):
if path.startswith('/'):
return path.replace(self.rootdir+'/', "")
return path
def has_subvol(self, subvol):
# refresh subvol list
self.get_subvols(refresh=True)
subvol = self.path_to_subvol(subvol)
for sub in self.subvols.values():
if sub['path'] == subvol:
return True
return False
def mount_snapvol(self):
self.get_dev()
cmd = ['mount', '-t', 'btrfs', '-o', 'subvol='+self.snapvol, self.dev, self.snapdir]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError("error mounting %s subvol:\ncmd: %s\n%s"%(self.label,' '.join(cmd),err))
def mount_rootvol(self):
if self.node:
return
self.get_dev()
if self.is_mounted_subvol(self.rootdir):
return
cmd = ['mount', '-t', 'btrfs', '-o', 'subvolid=0', self.dev, self.rootdir]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise ExecError("error mounting %s btrfs:\ncmd: %s\n%s"%(self.label,' '.join(cmd),err))
def create_snapvol(self):
self.get_dev()
error = False
import tempfile
tmpdir = tempfile.mktemp()
cmd = ['mkdir', '-p', tmpdir]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise ExecError("error creating dir %s:\n"%tmpdir+err)
cmd = ['mount', '-t', 'btrfs', '-o', 'subvolid=0', self.dev, tmpdir]
out, err, ret = self.justcall(cmd)
if ret != 0:
self.rmdir(tmpdir)
raise ExecError("error mounting %s btrfs:\ncmd: %s\n%s"%(self.label,' '.join(cmd),err))
try:
self.create_subvol(os.path.join(tmpdir, self.snapvol))
except:
error = True
cmd = ['umount', tmpdir]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise ExecError("error umounting %s btrfs:\n"%self.label+err)
self.rmdir(tmpdir)
if error:
raise ExecError("failed to create %s"%self.snapvol)
def vcall(self, cmd):
if self.node is not None:
cmd = [' '.join(cmd)]
cmd = rcEnv.rsh.split() + [self.node] + cmd
return vcall(cmd, log=self.log)
def justcall(self, cmd):
if self.node is not None:
cmd = [' '.join(cmd)]
cmd = rcEnv.rsh.split() + [self.node] + cmd
return justcall(cmd)
def create_subvol(self, path):
cmd = ['btrfs', 'subvol', 'create', path]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError("error creating %s subvol"%path)
def setup_snap_subvol(self):
# unused for now
if not self.dir_exists(self.snapdir):
cmd = ['mkdir', '-p', self.snapdir]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError("error creating dir %s:\n"%self.snapdir+err)
if not self.has_snapvol():
self.create_snapvol()
self.mount_snapvol()
try:
o = Btrfs(self.snapdir)
except InitError:
self.mount_snapvol()
o = Btrfs(self.snapdir)
if o.label != self.label:
raise ExecError("wrong fs mounted in %s: %s"%(self.snapdir, o.label))
# verify this is the right subvol (missing: path->subvol name fn)
def setup_rootvol(self):
self.rootdir = os.path.join(rcEnv.pathvar, 'btrfs', self.label)
if not self.dir_exists(self.rootdir):
cmd = ['mkdir', '-p', self.rootdir]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ExecError("error creating dir %s:\n"%self.rootdir+err)
self.mount_rootvol()
def setup_snap(self):
if not self.has_snapvol():
self.create_subvol(self.snapdir)
try:
o = Btrfs(self.snapdir)
except InitError:
self.mount_snapvol()
o = Btrfs(self.snapdir)
if o.label != self.label:
raise ExecError("wrong fs mounted in %s: %s"%(self.snapdir, o.label))
# verify this is the right subvol (missing: path->subvol name fn)
def get_mounts(self):
"""
/dev/vdb on /data type btrfs (rw) [data]
"""
cmd = ['mount', '-t', 'btrfs', '-l']
out, err, ret = self.justcall(cmd)
if ret != 0:
raise InitError("error running %s:\n"%' '.join(cmd)+err)
mounts = {}
for line in out.split("\n"):
if len(line) == 0 or " on " not in line or " type btrfs " not in line:
continue
mntpt = line[line.index(" on ")+4:line.index(" type btrfs ")]
if '[' in line:
l = line.split('[')
label = l[-1].strip(']')
else:
label = self.get_label(mntpt)
mounts[mntpt] = label
return mounts
def get_label(self, mntpt):
cmd = ['btrfs', 'fi', 'label', mntpt]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise excError("error running %s:\n"%' '.join(cmd)+err)
return out.strip('\n')
def is_mounted_subvol(self, path):
path = path.rstrip('/')
for mntpt, label in self.get_mounts().items():
if mntpt == path and label == self.label:
return True
return False
def get_label_from_path(self, path):
path = path.rstrip('/')
mounts = self.get_mounts()
l = path.split('/')
while len(l) > 0:
m = '/'.join(l)
if m in mounts:
self.label = mounts[m]
return
l = l[:-1]
raise InitError("could not get label from path %s"%path)
def parse_fi_show(self):
"""
Label: 'data' uuid: 0d05d0b9-ffab-4ab8-b923-15a38ec806d5
Total devices 2 FS bytes used 48.92MB
devid 2 size 5.00GB used 1.51GB path /dev/vdc
devid 1 size 5.00GB used 1.53GB path /dev/vdb
"""
cmd = ['btrfs', 'fi', 'show', path]
out, err, ret = self.justcall(cmd)
if ret != 0:
raise InitError("error running btrfs fi show:\n"+err)
self.devs = {}
for line in out.split("\n"):
if "Label:" in line:
l = line.split("'")
if len(l) != 2:
raise InitError("unexpected line format: "+line)
label = l[1]
l = line.split()
uuid = l[-1]
elif line.strip().startswith("devid"):
l = line.split()
self.devs[l[-1]] = (label, uuid)
def get_transid(self, path):
"""
/opt/opensvc/var/btrfs/win2/win2@sent
Name: win2@sent
uuid: 167af15f-7d5a-2745-966c-dde4aaa056b7
Parent uuid: 30121b33-a10f-a642-8caf-0184f5d8e5b0
Creation time: 2015-09-02 04:01:20
Object ID: 549
Generation (Gen): 591564
Gen at creation: 591564
Parent: 5
Top Level: 5
Flags: readonly
Snapshot(s):
"""
cmd = ['btrfs', 'subvolume', 'show', path]
out, err, ret = justcall(cmd)
if ret != 0:
raise ExecError("can't fetch %s transid:\n%s"%(path, err))
for line in out.split("\n"):
if "Generation" in line:
return line.split()[-1]
raise ExecError("can't find %s transid\n"%path)
def __str__(self):
self.get_subvols()
s = "label: %s\n" % self.label
s += "subvolumes:\n"
for sub in self.subvols.values():
s += "id: %s parent_id: %s top: %s path: %s\n"%(sub['id'], sub['parent_id'], sub['top'], sub['path'])
return s
if __name__ == "__main__":
o = Btrfs(label=sys.argv[1])
print(o.get_transid("/opt/opensvc/var/btrfs/deb1/deb1@sent"))
#o.setup_snap()
opensvc-1.8~20170412/lib/rcUtilitiesDarwin.py 0000644 0001750 0001750 00000000570 13073467726 021052 0 ustar jkelbert jkelbert from rcUtilities import call, qcall
def check_ping(addr, timeout=5, count=1):
if ':' in addr:
ping = 'ping6'
else:
ping = 'ping'
cmd = [ping, '-c', repr(count),
'-W', repr(timeout),
'-t', repr(timeout),
addr]
(ret, out, err) = call(cmd)
if ret == 0:
return True
return False
opensvc-1.8~20170412/lib/nodeHP-UX.py 0000644 0001750 0001750 00000000374 13073467726 017116 0 ustar jkelbert jkelbert import node
class Node(node.Node):
def shutdown(self):
cmd = ["shutdown", "-h", "-y", "0"]
ret, out, err = self.vcall(cmd)
def _reboot(self):
cmd = ["shutdown", "-r", "-y", "0"]
ret, out, err = self.vcall(cmd)
opensvc-1.8~20170412/lib/resSyncEvasnap.py 0000644 0001750 0001750 00000024714 13073467726 020357 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which
import rcExceptions as ex
import rcStatus
import time
import datetime
import xml.etree.ElementTree as ET
import subprocess
import resSync
class syncEvasnap(resSync.Sync):
def wait_for_devs_ready(self):
pass
def can_sync(self, target=None):
ts = None
""" get oldest snap
"""
for pair in self.pairs:
info = self.lun_info(pair['dst'])
if info is None:
self.log.debug("snap %s missing"%pair['dst'])
return True
_ts = info['creationdatetime']
if ts is None or _ts < ts:
ts = _ts
return not self.skip_sync(ts)
def recreate(self):
def snapname(info):
return info['objectname'].split('\\')[-2]+'_'+self.snap_name
try:
self.prereq()
except ex.excError as e:
self.log.error(str(e))
raise ex.excError
status = self._status(skip_prereq=True)
if not self.can_sync():
return
if not self.svc.options.force and status == rcStatus.UP:
self.log.info("snapshots are already fresh. use --force to bypass")
return
cmd = []
for pair in self.pairs:
if pair['dst'] in self._lun_info:
info = self._lun_info[pair['dst']]
for mask in pair['mask']:
lunid = int(mask.split('\\')[-1])
hostpath = '\\'.join(mask.split('\\')[:-1])
if hostpath in info['mask'] and info['mask'][hostpath] == lunid:
cmd += ['delete lun "%s"'%mask]
cmd += ['delete vdisk "%s" wait_for_completion'%info['objectname']]
self.sssu(cmd, verbose=True)
cmd = []
for pair in self.pairs:
info = self.lun_info(pair['src'])
if 'allocation_policy' in pair:
policy = str(pair['allocation_policy']).lower()
else:
policy = 'demand'
if policy not in ['demand', 'fully']:
policy = 'demand'
if 'vraid' in pair and pair['vraid'] in ['vraid6', 'vraid5', 'vraid0', 'vraid1']:
force_vraid = "redundancy=%s"%pair['vraid']
else:
force_vraid = ""
cmd += ['add snapshot %s vdisk="%s" allocation_policy=%s world_wide_lun_name=%s %s'%(snapname(info), info['objectname'], policy, self.convert_wwid(pair['dst']), force_vraid)]
self.sssu(cmd, verbose=True)
cmd = []
for pair in self.pairs:
info = self.lun_info(pair['dst'])
for mask in pair['mask']:
lunid = mask.split('\\')[-1]
hostpath = '\\'.join(mask.split('\\')[:-1])
cmd += ['add lun %s host="%s" vdisk="%s"'%(lunid, hostpath, snapname(info))]
self.sssu(cmd, verbose=True)
def sssu(self, cmd=[], verbose=False, check=True):
os.chdir(rcEnv.pathtmp)
cmd = [self.sssubin,
"select manager %s username=%s password=%s"%(self.manager, self.username, self.password),
"select system %s"%self.eva_name] + cmd
if verbose:
import re
from copy import copy
_cmd = copy(cmd)
_cmd[1] = re.sub(r'password=.*', 'password=xxxxx', _cmd[1])
self.log.info(subprocess.list2cmdline(_cmd))
ret, out, err = self.call(cmd)
if 'Error:' in out > 0:
self.log.error(out)
else:
self.log.info(out)
else:
ret, out, err = self.call(cmd)
if check and "Error" in out:
raise ex.excError("sssu command execution error")
return ret, out, err
def lun_info(self, wwid):
if wwid in self._lun_info:
return self._lun_info[wwid]
if '-' not in wwid:
_wwid = wwid
wwid = self.convert_wwid(wwid)
info = {
'oxuid': 'unknown',
'lunid': -1,
'creationdatetime': datetime.datetime(year=datetime.MINYEAR, month=1, day=1),
'mask': {}
}
try:
ret, out, err = self.sssu(["find vdisk lunwwid="+wwid+" xml"])
except:
return None
l = out.split('\n')
for i, line in enumerate(l):
if line == '':
l = l[i:]
break
e = ET.fromstring('\n'.join(l))
for p in e.findall("presentations/presentation"):
host = p.find("hostname").text
lunid = p.find("lunnumber").text
info['mask'][host] = int(lunid)
e_oxuid = e.find("objectparenthexuid")
if e_oxuid is not None:
info['oxuid'] = e_oxuid.text.replace('-','')
e_objectname = e.find("objectname")
if e_objectname is not None:
info['objectname'] = e_objectname.text
e_creationdatetime = e.find("creationdatetime")
if e_oxuid is not None:
try:
creationdatetime = datetime.datetime.strptime(e_creationdatetime.text, "%d-%b-%Y %H:%M:%S")
info['creationdatetime'] = creationdatetime
except:
self.log.error("failed to parse snapshot creation datetime")
pass
self._lun_info[_wwid] = info
return info
def _status(self, verbose=False, skip_prereq=False):
err = False
errlog = []
try:
if not skip_prereq:
self.prereq()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
for pair in self.pairs:
info_s = self.lun_info(pair['src'])
info_d = self.lun_info(pair['dst'])
if info_s is None:
errlog.append("snapshot source %s does not exists"%pair['src'])
err |= True
continue
if info_d is None:
errlog.append("snapshot %s does not exists"%pair['dst'])
err |= True
continue
if info_s['oxuid'].lower() != info_d['oxuid'].lower():
errlog.append("snapshot %s exists but incorrect parent object uid: %s"%(pair['dst'], info_d['oxuid'].lower()))
err |= True
if info_d['creationdatetime'] < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
errlog.append("snapshot %s too old"%pair['dst'])
err |= True
for mask in pair['mask']:
hostpath = '\\'.join(mask.split('\\')[:-1])
hostname = mask.split('\\')[-2]
dstlunid = mask.split('\\')[-1]
if hostpath not in info_d['mask']:
errlog.append("snapshot %s exists but not presented to host %s"%(pair['dst'], hostname))
err |= True
continue
try:
dstlunid = int(dstlunid)
if info_d['mask'][hostpath] != dstlunid:
errlog.append("snapshot %s exists but incorrect lunid for host %s"%(pair['dst'], hostname))
err |= True
except ValueError:
pass
if err:
self.status_log('. '.join(errlog))
return rcStatus.WARN
return rcStatus.UP
def sync_resync(self):
self.recreate()
def sync_update(self):
self.recreate()
def refresh_svcstatus(self):
self.svcstatus = self.svc.group_status(excluded_groups=set(["sync", 'hb']))
def get_svcstatus(self):
if len(self.svcstatus) == 0:
self.refresh_svcstatus()
def convert_wwid(self, wwid):
s = ""
for i, c in enumerate(wwid):
s += c
if (i+1) % 4 == 0: s += '-'
return s.strip('-')
def prereq(self):
import ConfigParser
if not os.path.exists(self.conf):
raise ex.excError("missing %s"%self.conf)
self.config = ConfigParser.RawConfigParser()
self.config.read(self.conf)
if not self.config.has_section(self.eva_name):
raise ex.excError("no credentials for array %s in %s"%(self.eva_name, self.conf))
if not self.config.has_option(self.eva_name, "manager"):
raise ex.excError("no manager set for array %s in %s"%(self.eva_name, self.conf))
if not self.config.has_option(self.eva_name, "username"):
raise ex.excError("no username set for array %s in %s"%(self.eva_name, self.conf))
if not self.config.has_option(self.eva_name, "password"):
raise ex.excError("no password set for array %s in %s"%(self.eva_name, self.conf))
self.manager = self.config.get(self.eva_name, "manager")
self.username = self.config.get(self.eva_name, "username")
self.password = self.config.get(self.eva_name, "password")
try:
self.sssubin = self.config.get(self.eva_name, "bin")
except:
self.sssubin = None
if self.sssubin:
sssubin = which(self.sssubin)
else:
sssubin = None
if not sssubin:
raise ex.excError("missing %s"%self.sssubin)
if not self.sssubin:
# sssu in PATH and not specified in auth.conf
self.sssubin = sssubin
for pair in self.pairs:
if 'src' not in pair or 'dst' not in pair or 'mask' not in pair:
raise ex.excError("missing parameter in pair %s"%str(pair))
ret, out, err = self.sssu(check=False)
if "Error opening https connection" in out:
raise ex.excError("error login to %s"%self.manager)
elif "Error" in out:
raise ex.excError("eva %s is not managed by %s"%(self.eva_name, self.manager))
def __init__(self,
rid=None,
pairs=[],
eva_name="",
snap_name="",
**kwargs):
resSync.Sync.__init__(self,
rid=rid, type="sync.evasnap",
**kwargs)
self.label = "EVA snapshot %s"%(rid)
self.eva_name = eva_name
self.snap_name = snap_name
self.pairs = pairs
self.conf = os.path.join(rcEnv.pathetc, 'auth.conf')
self._lun_info = {}
self.default_schedule = "@0"
def __str__(self):
return "%s eva_name=%s pairs=%s" % (resSync.Sync.__str__(self),\
self.eva_name, str(self.pairs))
opensvc-1.8~20170412/lib/rcIfconfigHP-UX.py 0000644 0001750 0001750 00000011573 13073467726 020245 0 ustar jkelbert jkelbert from subprocess import *
import rcIfconfig
import rcExceptions as ex
class ifconfig(rcIfconfig.ifconfig):
def parse(self, out):
if len(out) == 0:
return
intf = out.split()[0]
if intf[len(intf)-1] == ':':
intf = intf[0:len(intf)-1]
i = rcIfconfig.interface(intf)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mask = ''
i.mtu = ''
i.ipaddr = ''
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
if i.name in self.hwaddr:
i.hwaddr = self.hwaddr[i.name]
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
prev = ''
for w in out.split():
if 'broadcast' in prev:
i.bcast = w
elif 'netmask' in prev:
if w == '0':
i.mask = "0.0.0.0"
elif len(w) == 8:
i.mask = "%d.%d.%d.%d"%(
int(w[0:2], 16),
int(w[2:4], 16),
int(w[4:6], 16),
int(w[6:8], 16)
)
else:
raise ex.excError("malformed ifconfig %s netmask: %s"%(intf, w))
elif 'inet' == prev:
i.ipaddr = w
elif 'inet6' == prev:
i.ip6addr += [w]
elif 'prefix' == prev:
i.ip6mask += [w]
if 'UP' in w:
i.flag_up = True
if 'BROADCAST' in w:
i.flag_broadcast = True
if 'RUNNING' in w:
i.flag_running = True
if 'MULTICAST' in w:
i.flag_multicast = True
if 'LOOPBACK' in w:
i.flag_loopback = True
prev = w
def get_mcast(self):
cmd = ['netstat', '-gn']
out = Popen(cmd, stdout=PIPE).communicate()[0]
return self.parse_mcast(out)
def parse_mcast(self, out):
lines = out.split('\n')
found = False
data = {}
for i, line in enumerate(lines):
if line.startswith('Name'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if len(line) == 0:
break
try:
intf, addr, refcnt = line.split()
except:
continue
if addr == "*":
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
if len(lines) <= i + 1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if line.startswith('Name'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+1:]
for i, line in enumerate(lines):
if len(line) == 0:
break
try:
intf, addr, refcnt = line.split()
except:
continue
if addr == "*":
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
return data
def __init__(self, hwaddr=False, mcast=False):
self.intf = []
intf_list = []
self.hwaddr = {}
if mcast:
self.mcast_data = self.get_mcast()
else:
self.mcast_data = {}
if hwaddr:
lines = Popen(['lanscan', '-i', '-a'], stdout=PIPE).communicate()[0].split('\n')
for line in lines:
l = line.split()
if len(l) < 2:
continue
mac = l[0].replace('0x','').lower()
if len(mac) < 11:
continue
mac_l = list(mac)
for c in (10, 8, 6, 4, 2):
mac_l.insert(c, ':')
self.hwaddr[l[1]] = ''.join(mac_l)
out = Popen(['netstat', '-win'], stdout=PIPE).communicate()[0]
for line in out.split('\n'):
if len(line) == 0:
continue
if 'IPv4:' in line or 'IPv6' in line:
continue
intf = line.split()[0]
intf_list.append(intf.replace('*', ''))
for intf in intf_list:
p = Popen(['ifconfig', intf], stdout=PIPE, stderr=PIPE)
out = p.communicate()
if "no such interface" in out[1]:
continue
elif p.returncode != 0:
raise ex.excError
self.parse(out[0])
opensvc-1.8~20170412/lib/rcDiskInfo.py 0000644 0001750 0001750 00000001235 13073467726 017437 0 ustar jkelbert jkelbert import rcExceptions as ex
class diskInfo(object):
"""Parent class for diskInfo OS"""
print_diskinfo_fmt = "%-12s %-8s %12s MB %-8s %-8s %-16s"
def disk_id(self, dev):
return "tbd"
def disk_vendor(self, dev):
return "tbd"
def disk_model(self, dev):
return "tbd"
def disk_size(self, dev):
return "tbd"
def print_diskinfo_header(self):
print(self.print_diskinfo_fmt%(
"hbtl",
"devname",
"size",
"dev",
"vendor",
"model",
))
def scanscsi(self, hba=None, target=None, lun=None):
raise ex.excError("not implemented")
opensvc-1.8~20170412/lib/checkRaidMegaRaidLinux.py 0000777 0001750 0001750 00000000000 13073467726 025510 2checkRaidMegaRaid.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resContainerOvm.py 0000644 0001750 0001750 00000012613 13073467726 020524 0 ustar jkelbert jkelbert import resources as Res
import os
import rcExceptions as ex
import resContainer
from rcGlobalEnv import rcEnv
rcU = __import__("rcUtilities" + os.uname()[0])
class Ovm(resContainer.Container):
startup_timeout = 180
shutdown_timeout = 120
def __init__(self,
rid,
name,
uuid,
guestos=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.ovm",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.uuid = uuid
self.xen_d = os.path.join(os.sep, 'etc', 'xen')
self.xen_auto_d = os.path.join(self.xen_d, 'auto')
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def list_conffiles(self):
cf = os.path.join(self.xen_d, self.uuid)
if os.path.exists(cf):
return [cf]
return []
def files_to_sync(self):
return self.list_conffiles()
def check_capabilities(self):
cmd = ['xm', 'info']
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
self.status_log("can not fetch xm info")
return False
return True
def ping(self):
return rcU.check_ping(self.addr, timeout=1, count=1)
def find_vmcf(self):
import glob
l = glob.glob('/OVS/Repositories/*/VirtualMachines/'+self.uuid+'/vm.cfg')+glob.glob(os.path.join(self.xen_d, self.uuid))
if len(l) > 1:
self.log.warning("%d configuration files found in repositories (%s)"%(len(l), str(l)))
elif len(l) == 0:
raise ex.excError("no configuration file found in repositories")
return l[0]
def _migrate(self):
cmd = ['xm', 'migrate', '-l', self.uuid, self.svc.options.destination_node]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_start(self):
cf = self.find_vmcf()
cmd = ['xm', 'create', cf]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_stop(self):
cmd = ['xm', 'shutdown', self.uuid]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_forcestop(self):
cmd = ['xm', 'destroy', self.uuid]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
cmd = ['xm', 'list', '--state=running']
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
return False
for line in out.split('\n'):
l = line.split()
if len(l) < 4:
continue
if self.uuid == l[0]:
return True
return False
def get_container_info(self):
cmd = ['xm', 'list', self.uuid]
(ret, out, err) = self.call(cmd, errlog=False, cache=True)
self.info = {'vcpus': '0', 'vmem': '0'}
if ret != 0:
return self.info
for line in out.split('\n'):
l = line.split()
if len(l) < 4:
continue
if self.uuid != l[0]:
continue
self.info['vcpus'] = l[3]
self.info['vmem'] = l[2]
return self.info
self.log.error("malformed 'xm list %s' output: %s"%(self.uuid, line))
self.info = {'vcpus': '0', 'vmem': '0'}
return self.info
def check_manual_boot(self):
f = os.path.join(self.xen_auto_d, self.uuid)
if os.path.exists(f):
return False
return True
def devmap(self):
if hasattr(self, "devmapping"):
return self.devmapping
self.devmapping = []
cf = self.find_vmcf()
with open(cf, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
if not line.startswith('disk'):
continue
disks = line[line.index('['):]
if len(line) <= 2:
break
disks = disks[1:-1]
disks = disks.split(', ')
for disk in disks:
disk = disk.strip("'")
d = disk.split(',')
if not d[0].startswith('phy:'):
continue
l = [d[0].strip('phy:'), d[1]]
self.devmapping.append(l)
break
return self.devmapping
def devlist(self):
if hasattr(self, 'devs') and self.devs != set():
return self.devs
self.devs = set(map(lambda x: x[0], self.devmap()))
return self.devs
def disklist(self):
if hasattr(self, 'disks') and self.disks != set():
return self.disks
self.disks = set()
devps = self.devlist()
try:
u = __import__('rcUtilities'+rcEnv.sysname)
self.disks = u.devs_to_disks(self, devps)
except:
self.disks = devps
return self.disks
opensvc-1.8~20170412/lib/nodeDarwin.py 0000644 0001750 0001750 00000000364 13073467726 017500 0 ustar jkelbert jkelbert import node
class Node(node.Node):
def shutdown(self):
cmd = ["shutdown", "-h", "now"]
ret, out, err = self.vcall(cmd)
def _reboot(self):
cmd = ["shutdown", "-r", "now"]
ret, out, err = self.vcall(cmd)
opensvc-1.8~20170412/lib/rcAssetFreeBSD.py 0000644 0001750 0001750 00000004060 13073467726 020142 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import rcAssetLinux
class Asset(rcAssetLinux.Asset):
def _get_mem_bytes(self):
cmd = ['sysctl', 'hw.realmem']
(out, err, ret) = justcall(cmd)
if ret != 0:
return '0'
lines = out.split('\n')
if len(lines) < 1:
return '0'
line = lines[0].split()
if len(line) < 2:
return '0'
mb = int(line[-1])
return str(mb/1024/1024)
def _get_os_vendor(self):
return 'FreeBSD'
def _get_os_release(self):
return self._get_os_kernel()
def _get_os_arch(self):
cmd = ['sysctl', 'hw.machine_arch']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) < 1:
return 'Unknown'
line = lines[0].split()
if len(line) < 2:
return 'Unknown'
return line[-1]
def _get_cpu_model(self):
cmd = ['sysctl', 'hw.model']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) < 1:
return 'Unknown'
line = lines[0].split()
if len(line) < 2:
return 'Unknown'
return " ".join(line[1:])
def _get_cpu_cores(self):
cmd = ['sysctl', 'hw.ncpu']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) < 1:
return 'Unknown'
line = lines[0].split()
if len(line) < 2:
return 'Unknown'
return line[-1]
def _get_cpu_freq(self):
cmd = ['sysctl', 'hw.clockrate']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) < 1:
return 'Unknown'
line = lines[0].split()
if len(line) < 2:
return 'Unknown'
return line[-1]
opensvc-1.8~20170412/lib/provZone.py 0000644 0001750 0001750 00000034177 13073467726 017241 0 ustar jkelbert jkelbert import os
import resContainerZone
import lock
from provisioning import Provisioning
from rcZfs import Dataset
import rcZone
from rcExceptions import excError
from rcGlobalEnv import rcEnv
from rcUtilitiesSunOS import get_solaris_version
from rcUtilities import justcall
import shutil
SYSIDCFG="/etc/sysidcfg"
class ProvisioningZone(Provisioning):
def __init__(self, r):
"""
"""
Provisioning.__init__(self, r)
self.log = self.r.log
self.section = r.svc.config.defaults()
if 'container_origin' in self.section:
self.container_origin = self.section['container_origin']
else:
self.container_origin = "skelzone"
if 'snapof' in self.section:
self.snapof = self.section['snapof']
else:
self.snapof = None
if 'snap' in self.section:
self.clone = self.section['snap']
else:
self.clone = "rpool/zones/" + r.name
if 'zonepath' in self.section:
self.zonepath = self.section['zonepath']
else:
self.zonepath = None
def sysid_network(self):
"""
network_interface=l226z1 {primary
hostname=zone1-32
ip_address=172.30.5.232
netmask=255.255.255.0
protocol_ipv6=no
default_route=172.30.5.1}
"""
cf = os.path.join(rcEnv.pathetc, self.r.svc.svcname+'.conf')
s = ""
for r in self.r.svc.get_resources(["ip"]):
# Add mandatory tags for sol11 zones
r.tags.add("noaction")
r.tags.add("noalias")
r.tags.add("exclusive")
r.tags.remove("preboot")
r.tags.remove("postboot")
# Add nonrouted tag if no gateway provisioning keyword is passed
if not r.svc.config.has_option(r.rid, "gateway"):
r.tags.add("nonrouted")
if not r.svc.config.has_option(r.rid, "gateway"):
continue
default_route = r.svc.config.get(r.rid, "gateway")
if not r.svc.config.has_option(r.rid, "netmask"):
continue
netmask = r.svc.config.get(r.rid, "netmask")
if s == "":
s += "network_interface=%s {primary\n"%r.ipdev
s += " hostname=%s\n"%r.ipname
s += " ip_address=%s\n"%r.addr
s += " netmask=%s\n"%netmask
s += " protocol_ipv6=no\n"
s += " default_route=%s}\n"%default_route
# save new service env file
self.r.svc.config.set(r.rid, "tags", ' '.join(r.tags))
with open(cf, 'w') as f:
self.r.svc.config.write(f)
return s
def get_tz(self):
if "TZ" not in os.environ:
return "MET"
tz = os.environ["TZ"]
tzp = os.path.join(os.sep, "etc", tz)
if os.path.exists(tzp) and self.osver >= 11:
p = os.path.realpath(tzp)
l = p.split('zoneinfo/')
if len(l) != 2:
return "MET"
return l[-1]
else:
return tz
def get_ns(self):
"return (domain, nameservers) detected from resolv.conf"
p = os.path.join(os.sep, 'etc', 'resolv.conf')
domain = None
search = []
nameservers = []
with open(p) as f:
for line in f.readlines():
if line.strip().startswith('search'):
l = line.split()
if len(l) > 1:
search = l[1:]
if line.strip().startswith('domain'):
l = line.split()
if len(l) > 1:
domain = l[1]
if line.strip().startswith('nameserver'):
l = line.split()
if len(l) > 1 and l[1] not in nameservers:
nameservers.append(l[1])
return (domain, nameservers, search)
def create_sysidcfg(self, zone=None):
self.r.log.info("creating zone sysidcfg file")
if self.osver >= 11.0:
self._create_sysidcfg_11(zone)
else:
self._create_sysidcfg_10(zone)
def _create_sysidcfg_11(self, zone=None):
try:
domain, nameservers, search = self.get_ns()
if domain is None and len(search) > 0:
domain = search[0]
if domain is None or len(nameservers) == 0:
name_service="name_service=none"
else:
name_service = "name_service=DNS {domain_name=%s name_server=%s search=%s}\n" % (
domain,
",".join(nameservers),
",".join(search)
)
sysidcfg_dir = os.path.join(rcEnv.pathvar, self.r.svc.svcname)
sysidcfg_filename = os.path.join(sysidcfg_dir, 'sysidcfg')
contents = ""
contents += "keyboard=US-English\n"
contents += "system_locale=C\n"
contents += "timezone=%s\n"%self.get_tz()
contents += "terminal=vt100\n"
contents += "timeserver=localhost\n"
contents += self.sysid_network()
contents += "root_password=NP\n"
contents += "security_policy=NONE\n"
contents += name_service
try:
os.makedirs(sysidcfg_dir)
except:
pass
with open(sysidcfg_filename, "w") as sysidcfg_file:
sysidcfg_file.write(contents)
os.chdir(sysidcfg_dir)
self.zonecfg_xml = os.path.join(sysidcfg_dir, "sc_profile.xml")
try:
os.unlink(self.zonecfg_xml)
except:
pass
cmd = ['/usr/sbin/js2ai', '-s']
out, err, ret = justcall(cmd)
if not os.path.exists(self.zonecfg_xml):
raise excError("js2ai conversion error")
except Exception,e:
raise excError("exception from %s: %s during create_sysidcfg file" % (e.__class__.__name__, e.__str__()))
def _create_sysidcfg_10(self, zone=None):
try:
name_service = "name_service=NONE\n"
sysidcfg_filename = zone.zonepath + "/root" + SYSIDCFG
sysidcfg_file = open(sysidcfg_filename, "w" )
contents = ""
contents += "system_locale=C\n"
contents += "timezone=MET\n"
contents += "terminal=vt100\n"
contents += "timeserver=localhost\n"
contents += "security_policy=NONE\n"
contents += "root_password=NP\n"
contents += "nfs4_domain=dynamic\n"
contents += "network_interface=NONE {hostname=%(zonename)s}\n" % {"zonename":zone.name}
contents += name_service
sysidcfg_file.write(contents)
sysidcfg_file.close()
except Exception,e:
raise(excError("exception from %s: %s during create_sysidcfg file" % (e.__class__.__name__, e.__str__())))
def test_net_interface(self, intf):
cmd = ['dladm', 'show-link', intf]
out, err, ret = justcall(cmd)
if ret == 0:
return True
return False
def zone_configure_net(self, zone=None):
if zone is None:
zone = self.r
cmds = []
for r in self.r.svc.get_resources(["ip"]):
if not self.test_net_interface(r.ipdev):
raise excError("Missing interface: %s" % r.ipdev)
cmds.append("add net ; set physical=%s ; end" % r.ipdev)
for cmd in cmds:
zone.zonecfg([cmd])
def zone_configure(self, zone=None):
"""
configure zone, if zone is None, configure self.r
"""
if zone is None:
zone = self.r
if self.osver >= 11.0:
cmd = "create -t " + self.container_origin + "; set zonepath=" + zone.zonepath
else:
cmd = "create; set zonepath=" + zone.zonepath
if zone.state is None:
zone.zonecfg([cmd])
if zone.state != "configured":
raise(excError("zone %s is not configured" % (zone.name)))
if self.osver >= 11.0:
try:
self.zone_configure_net(zone)
except:
zone.zonecfg(["delete", "-F"])
raise
def create_zone2clone(self):
if os.path.exists(self.r.zonepath):
try:
os.chmod(self.r.zonepath, 0o0700)
except:
pass
if self.osver >= 11.0:
self._create_zone2clone_11()
else:
self._create_zone2clone_10()
def _create_zone2clone_11(self):
zonename = self.container_origin
zone2clone = resContainerZone.Zone(rid="container#skelzone", name=zonename)
zone2clone.log = self.r.log
if zone2clone.state == "installed":
return
self.zone_configure(zone=zone2clone)
if zone2clone.state != "configured":
raise(excError("zone %s is not configured" % (zonename)))
self.create_sysidcfg(zone2clone)
#zone2clone.zoneadm("clone", ['-c', self.zonecfg_xml, self.container_origin])
zone2clone.zoneadm("install")
if zone2clone.state != "installed":
raise(excError("zone %s is not installed" % (zonename)))
brand = zone2clone.brand
if brand == "native":
zone2clone.boot_and_wait_reboot()
elif brand == "ipkg":
zone2clone.boot()
else:
raise(excError("zone brand: %s not yet implemented" % (brand)))
zone2clone.wait_multi_user()
zone2clone.stop()
zone2clone.zone_refresh()
if zone2clone.state != "installed":
raise(excError("zone %s is not installed" % (zonename)))
def _create_zone2clone_10(self):
"""verify if self.container_origin zone is installed
else configure container_origin if required
then install container_origin if required
"""
zonename = self.container_origin
zone2clone = resContainerZone.Zone(rid="container#skelzone", name=zonename)
zone2clone.log = self.r.log
if zone2clone.state == "installed":
return
self.zone_configure(zone=zone2clone)
if zone2clone.state != "configured":
raise(excError("zone %s is not configured" % (zonename)))
zone2clone.zoneadm("install")
if zone2clone.state != "installed":
raise(excError("zone %s is not installed" % (zonename)))
self.create_sysidcfg(zone2clone)
brand = zone2clone.brand
if brand == "native":
zone2clone.boot_and_wait_reboot()
elif brand == "ipkg":
zone2clone.boot()
else:
raise(excError("zone brand: %s not yet implemented" % (brand)))
zone2clone.wait_multi_user()
zone2clone.stop()
zone2clone.zone_refresh()
if zone2clone.state != "installed":
raise(excError("zone %s is not installed" % (zonename)))
def create_cloned_zone(self):
zone = self.r
if zone.state == "running":
self.log.info("zone %s already running"%zone.name)
return
if zone.state == "configured":
if self.osver >= 11.0:
self._create_cloned_zone_11(zone)
else:
self._create_cloned_zone_10(zone)
if zone.state != "installed":
raise(excError("zone %s is not installed" % (zone.name)))
def _create_cloned_zone_11(self, zone):
zone.zoneadm("clone", ['-c', self.zonecfg_xml, self.container_origin])
def _create_cloned_zone_10(self, zone):
zone.zoneadm("clone", [self.container_origin])
def create_zonepath(self):
"""create zonepath dataset from clone of snapshot of self.snapof
snapshot for self.snapof will be created
then cloned to self.clone
"""
zonename = self.r.name
source_ds = Dataset(self.snapof)
if source_ds.exists(type="filesystem") is False:
raise(excError("source dataset doesn't exist " + self.snapof))
snapshot = source_ds.snapshot(zonename)
snapshot.clone(self.clone, ['-o', 'mountpoint=' + self.r.zonepath])
def provisioner(self, need_boot=True):
"""provision zone
- configure zone
- if snapof and zone brand is native
then create zonepath from snapshot of snapof
then attach zone
- if snapof and zone brand is ipkg
then try to detect zone associated with snapof
then define container_origin
- if container_origin
then clone container_origin
- create sysidcfg
- if need_boot boot and wait multiuser
"""
self.osver = get_solaris_version()
self.zone_configure()
if self.osver >= 11:
self.create_sysidcfg(self.r)
else:
if self.snapof is not None and self.r.brand == 'native':
self.create_zonepath()
self.r.zoneadm("attach", ["-F"])
elif self.snapof is not None and self.r.brand == 'ipkg':
zones = rcZone.Zones()
src_dataset = Dataset(self.snapof)
zonepath = src_dataset.getprop('mountpoint')
self.container_origin = zones.zonename_from_zonepath(zonepath).zonename
self.log.info("source zone is %s (detected from snapof %s)" % (self.container_origin, self.snapof))
if self.container_origin is not None:
lockname='create_zone2clone-' + self.container_origin
lockfile = os.path.join(rcEnv.pathlock, lockname)
self.log.info("wait get lock %s"%(lockname))
try:
lockfd = lock.lock(timeout=1200, delay=5, lockfile=lockfile)
except:
raise(excError("failure in get lock %s"%(lockname)))
try:
self.create_zone2clone()
except:
lock.unlock(lockfd)
raise
lock.unlock(lockfd)
self.create_cloned_zone()
if self.osver < 11:
self.create_sysidcfg(self.r)
if need_boot is True:
self.r.boot()
self.r.wait_multi_user()
self.r.log.info("provisioned")
return True
opensvc-1.8~20170412/lib/checkFmFmadmSunOS.py 0000777 0001750 0001750 00000000000 13073467726 023514 2checkFmFmadm.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resStonithCallout.py 0000644 0001750 0001750 00000000612 13073467726 021070 0 ustar jkelbert jkelbert import resStonith
import rcStatus
from rcUtilities import cmdline2list
from rcGlobalEnv import rcEnv
class Stonith(resStonith.Stonith):
def __init__(self, rid=None, cmd="/bin/false", **kwargs):
resStonith.Stonith.__init__(self, rid=rid, type="stonith.callout", **kwargs)
self.cmd = cmd
def _start(self):
_cmd = cmdline2list(self.cmd)
self.vcall(_cmd)
opensvc-1.8~20170412/lib/resSyncSymclone.py 0000644 0001750 0001750 00000020063 13073467726 020544 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which, justcall
import rcExceptions as ex
import rcStatus
import time
import datetime
import resSync
import xml.etree.ElementTree as ElementTree
class syncSymclone(resSync.Sync):
def wait_for_devs_ready(self):
pass
def pairs_file(self, pairs=None):
if pairs is None:
suffix = ""
else:
suffix = "." + ",".join(pairs)
return os.path.join(rcEnv.pathvar, self.svc.svcname, "pairs."+self.rid+suffix)
def write_pair_file(self, pairs=None):
if pairs is None:
_pairs = self.pairs
key = "all"
else:
_pairs = pairs
key = ",".join(pairs)
if key in self.pairs_written:
return
pf = self.pairs_file(pairs)
content = "\n".join(map(lambda x: x.replace(":", " "), _pairs))
with open(pf, "w") as f:
f.write(content)
self.log.debug("wrote content '%s' in file '%s'" % (content, pf))
self.pairs_written[key] = True
def symclone_cmd(self, pairs=None):
self.write_pair_file(pairs)
return ['/usr/symcli/bin/symclone', '-sid', self.symid, '-f', self.pairs_file(pairs)]
def is_active(self):
for pair in self.pairs:
if pair in self.active_pairs:
continue
found = False
for state in self.active_states:
cmd = self.symclone_cmd([pair]) + ['verify', '-'+state]
out, err, ret = justcall(cmd)
if ret == 0:
self.active_pairs.append(pair)
break
if len(self.active_pairs) == len(self.pairs):
return True
return False
def is_activable(self):
for state in self.activable_states:
cmd = self.symclone_cmd() + ['verify', '-'+state]
(ret, out, err) = self.call(cmd)
if ret == 0:
return True
return False
def wait_for_active(self):
delay = 10
self.active_pairs = []
ass = " or ".join(self.active_states)
for i in range(self.activate_timeout//delay+1):
if self.is_active():
return
if i == 0:
self.log.info("waiting for active state (max %i secs, %s)" % (timeout, ass))
time.sleep(delay)
self.log.error("timed out waiting for active state (%i secs, %s)" % (timeout, ass))
ina = set(self.pairs) - set(self.active_pairs)
ina = map(lambda x: ' '.join(x), ina)
ina = ", ".join(ina)
raise ex.excError("%s still not in active state (%s)" % (ina, ass))
def wait_for_activable(self):
delay = 10
ass = " or ".join(self.activable_states)
for i in range(self.recreate_timeout//delay+1):
if self.is_activable():
return
if i == 0:
self.log.info("waiting for activable state (max %i secs, %s)" % (self.activate_timeout, ass))
time.sleep(delay)
raise ex.excError("timed out waiting for activable state (%i secs, %s)" % (self.activate_timeout, ass))
def activate(self):
if self.is_active():
self.log.info("symclone target devices are already active")
return
self.wait_for_activable()
cmd = self.symclone_cmd() + ['-noprompt', 'activate', '-i', '20', '-c', '30']
if self.consistent:
cmd.append("-consistent")
(ret, out, err) = self.vcall(cmd, warn_to_info=True)
if ret != 0:
raise ex.excError
self.wait_for_active()
self.wait_for_devs_ready()
def can_sync(self, target=None):
try:
self.check_requires("sync_update")
except ex.excError as e:
self.log.debug(e)
return False
self.get_last()
if self.skip_sync(self.last):
return False
return True
def recreate(self):
self.get_last()
if self.skip_sync(self.last):
return
if self.is_activable():
self.log.info("symclone are already recreated")
return
cmd = self.symclone_cmd() + ['-noprompt', 'recreate', '-i', '20', '-c', '30']
if self.type == "sync.symclone" and self.precopy:
cmd.append("-precopy")
(ret, out, err) = self.vcall(cmd, warn_to_info=True)
if ret != 0:
raise ex.excError
def info(self):
data = [
["precopy", str(self.precopy)],
["pairs", str(self.pairs)],
["symid", str(self.symid)],
["consistent", str(self.consistent)],
]
return self.fmt_info(data)
def split_pair(self, pair):
l = pair.split(":")
if len(l) != 2:
raise ex.excError("pair %s malformed" % pair)
return l
def showdevs(self):
if len(self.showdevs_etree) > 0:
return
dst_devs = map(lambda x: x.split(":")[1], self.pairs)
cmd = ['/usr/symcli/bin/symdev', '-sid', self.symid, 'list', '-v', '-devs', ','.join(dst_devs), '-output', 'xml_e']
out, err, ret = justcall(cmd)
etree = ElementTree.fromstring(out)
etree = ElementTree.fromstring(out)
for e in etree.findall("Symmetrix/Device"):
dev_name = e.find("Dev_Info/dev_name").text
self.showdevs_etree[dev_name] = e
def last_action_dev(self, dev):
# format: Thu Feb 25 10:20:56 2010
self.showdevs()
s = self.showdevs_etree[dev].find("CLONE_Device/last_action").text
return datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y")
def get_last(self):
if self.last is not None:
return
self.showdevs()
for pair in self.pairs:
src, dst = self.split_pair(pair)
last = self.last_action_dev(dst)
if self.last is None or last > self.last:
self.last = last
def _status(self, verbose=False):
self.get_last()
if self.last is None:
return rcStatus.DOWN
elif self.last < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
self.status_log("Last sync on %s older than %d minutes"%(self.last, self.sync_max_delay))
return rcStatus.WARN
else:
self.status_log("Last sync on %s" % self.last, "info")
return rcStatus.UP
def sync_break(self):
self.activate()
def sync_resync(self):
self.recreate()
def sync_update(self):
self.recreate()
self.activate()
def start(self):
self.activate()
def __init__(self,
rid=None,
type="sync.symclone",
symid=None,
pairs=[],
precopy=True,
consistent=True,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type=type,
**kwargs)
if self.type == "sync.symclone":
self.active_states = ["copied", "copyinprog"]
self.activable_states = ["recreated", "precopy"]
elif self.type == "sync.symsnap":
self.active_states = ["copyonwrite"]
self.activable_states = ["recreated", "created"]
else:
raise ex.excInitError("unsupported symclone driver type %s", self.type)
self.activate_timeout = 20
self.recreate_timeout = 20
self.precopy = precopy
self.pairs_written = {}
self.label = "symclone symid %s pairs %s" % (symid, " ".join(pairs))
if len(self.label) > 80:
self.label = self.label[:76] + "..."
self.symid = symid
self.pairs = pairs
self.consistent = consistent
self.disks = set([])
self.svcstatus = {}
self.active_pairs = []
self.last = None
self.showdevs_etree = {}
self.default_schedule = "@0"
def __str__(self):
return "%s symid=%s pairs=%s" % (resSync.Sync.__str__(self),\
self.symid, str(self.pairs))
opensvc-1.8~20170412/lib/rcGce.py 0000644 0001750 0001750 00000001411 13073467726 016423 0 ustar jkelbert jkelbert from rcUtilities import is_string, justcall
import json
class Gce(object):
valid_auth = False
def gce_auth(self):
cmd = ["gcloud", "auth", "list", "--format", "json"]
out, err, ret = justcall(cmd)
if ret != 0:
return False
self.log.debug(out)
data = json.loads(out)
if "active_account" not in data:
return False
if not is_string(data["active_account"]):
return False
if len(data["active_account"]) == 0:
return False
return True
def wait_gce_auth(self):
if self.valid_auth:
return
self.wait_for_fn(self.gce_auth, 120, 1, errmsg="waited 120 seconds for a valid gcloud auth")
self.valid_auth = True
opensvc-1.8~20170412/lib/provSrp.py 0000644 0001750 0001750 00000005617 13073467726 017067 0 ustar jkelbert jkelbert from provisioning import Provisioning
from rcGlobalEnv import rcEnv
import os
import socket
import rcExceptions as ex
class ProvisioningSrp(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
self.name = r.name
self.rootpath = os.path.join(os.sep, 'var', 'hpsrp', self.name)
try:
self.prm_cores = r.svc.config.get(r.rid, 'prm_cores')
except:
self.prm_cores = "1"
self.ip = r.svc.config.get(r.rid, 'ip')
self.ip = self.lookup(self.ip)
self.need_start = []
def lookup(self, ip):
try:
int(ip[0])
# already in cidr form
return
except:
pass
try:
a = socket.getaddrinfo(ip, None)
if len(a) == 0:
raise Exception
ip = a[0][-1][0]
return ip
except:
raise ex.excError("could not resolve %s to an ip address"%self.ip)
def validate(self):
# False triggers provisioner, True skip provisioner
if not which('srp'):
self.r.log.error("this node is not srp capable")
return True
if self.check_srp():
self.r.log.error("container is already created")
return True
return False
def check_srp(self):
try:
self.r.get_status()
except:
return False
return True
def cleanup(self):
rs = self.r.svc.get_resources('fs')
rs.sort(key=lambda x: x.mount_point, reverse=True)
for r in rs:
if r.mount_point == self.rootpath:
continue
if not r.mount_point.startswith(self.rootpath):
continue
r.stop()
self.need_start.append(r)
os.unlink(r.mount_point)
p = r.mount_point
while True:
p = os.path.realpath(os.path.join(p, '..'))
if p == self.rootpath:
break
try:
self.r.log.info("unlink %s"%p)
os.unlink(p)
except:
break
def restart_fs(self):
for r in self.need_start:
r.start()
def add_srp(self):
self.cleanup()
cmd = ['srp', '-batch',
'-a', self.name,
'-t', 'system',
'-s', 'admin,cmpt,init,prm,network',
'ip_address='+self.ip, 'assign_ip=no',
'autostart=no',
'delete_files_ok=no',
'root_password=""',
'prm_group_type=PSET',
'prm_cores='+str(self.prm_cores)]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError()
self.restart_fs()
def provisioner(self):
self.add_srp()
self.r.start()
self.r.log.info("provisioned")
return True
opensvc-1.8~20170412/lib/svcmgr_parser.py 0000644 0001750 0001750 00000112336 13073467726 020266 0 ustar jkelbert jkelbert """
svcmgr command line actions and options
"""
from rcGlobalEnv import Storage
from rcOptParser import OptParser
from optparse import Option
PROG = "svcmgr"
OPT = Storage({
"account": Option(
"--account", default=False,
action="store_true", dest="account",
help="decides that the unavailabity period should be "
"deduced from the service availability anyway. "
"used with the 'collector ack unavailability' "
"action"),
"attach": Option(
"--attach", default=False,
action="store_true", dest="attach",
help="attach the modulesets specified during a "
"compliance check/fix/fixable command"),
"author": Option(
"--author", default=None,
action="store", dest="author",
help="the acker name to log when used with the "
"'collector ack unavailability' action"),
"begin": Option(
"--begin", default=None,
action="store", dest="begin",
help="a begin date expressed as 'YYYY-MM-DD hh:mm'. "
"used with the 'collector ack unavailability' "
"action"),
"cluster": Option(
"-c", "--cluster", default=False,
action="store_true", dest="cluster",
help="option to set when excuting from a clusterware to"
" disable safety net"),
"color": Option(
"--color", default="auto",
action="store", dest="color",
help="colorize output. possible values are : auto=guess "
"based on tty presence, always|yes=always colorize, "
"never|no=never colorize"),
"comment": Option(
"--comment", default=None,
action="store", dest="comment",
help="a comment to log when used with the 'collector "
"ack unavailability' action"),
"config": Option(
"--config", default=None,
action="store", dest="parm_config",
help="the configuration file to use when creating or "
"installing a service"),
"cron": Option(
"--cron", default=False,
action="store_true", dest="cron",
help="used by cron'ed action to tell the collector to "
"treat the log entries as such"),
"debug": Option(
"--debug", default=False,
action="store_true", dest="debug",
help="debug mode"),
"daemon": Option(
"--daemon", default=False,
action="store_true", dest="daemon",
help="a flag inhibiting the daemonization. set by the "
"daemonization routine."),
"disable_rollback": Option(
"--disable-rollback", default=False,
action="store_true", dest="disable_rollback",
help="Exit without resource activation rollback on start"
" action error"),
"discard": Option(
"--discard", default=False,
action="store_true", dest="discard",
help="Discard the stashed erroneous configuration file "
"in a 'edit config' command"),
"dry_run": Option(
"--dry-run", default=False,
action="store_true", dest="dry_run",
help="Show the action execution plan"),
"duration": Option(
"--duration", default=None,
action="store", dest="duration", type="int",
help="a duration expressed in minutes. used with the "
"'collector ack unavailability' action"),
"end": Option(
"--end", default=None,
action="store", dest="end",
help="a end date expressed as 'YYYY-MM-DD hh:mm'. used "
"with the 'collector ack unavailability' action"),
"env": Option(
"--env", default=[],
action="append", dest="env",
help="with the create action, set a env section "
"parameter. multiple --env = can be "
"specified."),
"eval": Option(
"--eval", default=False,
action="store_true", dest="eval",
help="If set with the 'get' action, the printed value of "
"--param is scoped and dereferenced."),
"force": Option(
"-f", "--force", default=False,
action="store_true", dest="force",
help="force action, ignore sanity check warnings"),
"format": Option(
"--format", default=None,
action="store", dest="format",
help="specify a data formatter for output of the print*"
" and collector* commands. possible values are json, csv"
" or table."),
"help": Option(
"-h", "--help", default=None,
action="store_true", dest="parm_help",
help="show this help message and exit"),
"hide_disabled": Option(
"--hide-disabled", default=None,
action="store_false", dest="show_disabled",
help="tell print|json status action to not include the "
"disabled resources in the output, irrespective of"
" the show_disabled service configuration setting."),
"id": Option(
"--id", default=0,
action="store", dest="id", type="int",
help="specify an object id to act on"),
"ignore_affinity": Option(
"--ignore-affinity", default=False,
action="store_true", dest="ignore_affinity",
help="ignore service anti-affinity with other services "
"check"),
"interactive": Option(
"-i", "--interactive", default=False,
action="store_true", dest="interactive",
help="prompt user for a choice instead of going for "
"defaults or failing"),
"like": Option(
"--like", default="%",
action="store", dest="like",
help="a sql like filtering expression. leading and "
"trailing wildcards are automatically set."),
"master": Option(
"--master", default=False,
action="store_true", dest="master",
help="option to set to limit the action scope to the "
"master service resources"),
"message": Option(
"--message", default="",
action="store", dest="message",
help="the message to send to the collector for logging"),
"module": Option(
"--module", default="",
action="store", dest="module",
help="compliance, set module list"),
"moduleset": Option(
"--moduleset", default="",
action="store", dest="moduleset",
help="compliance, set moduleset list. The 'all' value "
"can be used in conjonction with detach."),
"onlyprimary": Option(
"--onlyprimary", default=None,
action="store_true", dest="parm_primary",
help="operate only on service flagged for autostart on "
"this node"),
"onlysecondary": Option(
"--onlysecondary", default=None,
action="store_true", dest="parm_secondary",
help="operate only on service not flagged for autostart"
" on this node"),
"parallel": Option(
"-p", "--parallel", default=False,
action="store_true", dest="parallel",
help="start actions on specified services in parallel"),
"param": Option(
"--param", default=None,
action="store", dest="param",
help="point a service configuration parameter for the "
"'get' and 'set' actions"),
"provision": Option(
"--provision", default=False,
action="store_true", dest="provision",
help="with the install or create actions, provision the"
" service resources after config file creation. "
"defaults to False."),
"recover": Option(
"--recover", default=False,
action="store_true", dest="recover",
help="Recover the stashed erroneous configuration file "
"in a 'edit config' command"),
"refresh": Option(
"--refresh", default=False,
action="store_true", dest="refresh",
help="drop last resource status cache and re-evaluate "
"before printing with the 'print [json] status' "
"commands"),
"remote": Option(
"--remote", default=False,
action="store_true", dest="remote",
help="flag action as triggered by a remote node. used "
"to avoid recursively triggering actions amongst "
"nodes"),
"resource": Option(
"--resource", default=[],
action="append",
help="a resource definition in json dictionary format "
"fed to create or update"),
"rid": Option(
"--rid", default=None,
action="store", dest="parm_rid",
help="comma-separated list of resource to limit action "
"to"),
"ruleset": Option(
"--ruleset", default="",
action="store", dest="ruleset",
help="compliance, set ruleset list. The 'all' value can"
" be used in conjonction with detach."),
"ruleset_date": Option(
"--ruleset-date", default="",
action="store", dest="ruleset_date",
help="compliance, use rulesets valid on specified date"),
"service": Option(
"-s", "--service", default=None,
action="store", dest="parm_svcs",
help="comma-separated list of service to operate on"),
"show_disabled": Option(
"--show-disabled", default=None,
action="store_true", dest="show_disabled",
help="tell print|json status action to include the "
"disabled resources in the output, irrespective of"
" the show_disabled service configuration setting."),
"slave": Option(
"--slave", default=None, action="store", dest="slave",
help="option to set to limit the action scope to the "
"service resources in the specified, comma-"
"separated, slaves"),
"slaves": Option(
"--slaves", default=False,
action="store_true", dest="slaves",
help="option to set to limit the action scope to all "
"slave service resources"),
"status": Option(
"--status", default=None,
action="store", dest="parm_status",
help="operate only on service in the specified status "
"(up/down/warn)"),
"subsets": Option(
"--subsets", default=None,
action="store", dest="parm_subsets",
help="comma-separated list of resource subsets to limit"
" action to"),
"tag": Option(
"--tag", default=None,
action="store", dest="tag",
help="a tag specifier used by 'collector create tag', "
"'collector add tag', 'collector del tag'"),
"tags": Option(
"--tags", default=None,
action="store", dest="parm_tags",
help="comma-separated list of resource tags to limit "
"action to. The + separator can be used to impose "
"multiple tag conditions. Example: tag1+tag2,tag3 "
"limits the action to resources with both tag1 and"
" tag2, or tag3."),
"template": Option(
"--template", default=None,
action="store", dest="parm_template",
help="the configuration file template name or id, "
"served by the collector, to use when creating or "
"installing a service"),
"to": Option(
"--to", default=None,
action="store", dest="parm_destination_node",
help="remote node to start or migrate the service to"),
"unprovision": Option(
"--unprovision", default=False,
action="store_true", dest="unprovision",
help="with the delete action, unprovision the service "
"resources before config files file deletion. "
"defaults to False."),
"value": Option(
"--value", default=None,
action="store", dest="value",
help="set a service configuration parameter value for "
"the 'set --param' action"),
"verbose": Option(
"--verbose", default=False,
action="store_true", dest="verbose",
help="add more information to some print commands: +next"
" in 'print schedule'"),
"waitlock": Option(
"--waitlock", default=-1,
action="store", dest="parm_waitlock", type="int",
help="comma-separated list of resource tags to limit "
"action to"),
})
SVCMGR_OPTS = [
OPT.onlyprimary,
OPT.onlysecondary,
OPT.service,
OPT.status,
]
GLOBAL_OPTS = SVCMGR_OPTS + [
OPT.cluster,
OPT.color,
OPT.cron,
OPT.daemon,
OPT.debug,
OPT.parallel,
OPT.waitlock,
OPT.help,
OPT.remote,
]
ACTION_OPTS = [
OPT.dry_run,
OPT.force,
OPT.master,
OPT.rid,
OPT.slave,
OPT.slaves,
OPT.subsets,
OPT.tags,
]
START_ACTION_OPTS = [
OPT.disable_rollback,
OPT.ignore_affinity,
]
ACTIONS = {
'Service actions': {
'boot': {
'msg': 'start a service if executed on the primary node (or one of'
' the primary nodes in case of a flex service), '
'startstandby if not',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'dns_update': {
'msg': 'update the collector dns records for the service',
'options': ACTION_OPTS,
},
'shutdown': {
'msg': 'stop a service, disabling the background database logging',
'options': ACTION_OPTS,
},
'start': {
'msg': 'start all service resources',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'startstandby': {
'msg': 'start service resources flagged always on',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'startip': {
'msg': 'configure service ip addresses',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'startshare': {
'msg': 'start network shares',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'stopshare': {
'msg': 'stop network shares',
'options': ACTION_OPTS,
},
'startfs': {
'msg': 'prepare devices, logical volumes, mount service '
'filesystems, bootstrap containers',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'startapp': {
'msg': 'execute service application startup script',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'stop': {
'msg': 'stop all service resources not flagged always on. With '
'--force, stop all service resources, even those flagged '
'always on.',
'options': ACTION_OPTS,
},
'stopip': {
'msg': 'unconfigure service ip addresses',
'options': ACTION_OPTS,
},
'stopfs': {
'msg': 'shutdown container, umount service filesystems, deactivate'
' logical volumes',
'options': ACTION_OPTS,
},
'stopapp': {
'msg': 'execute service application stop script',
'options': ACTION_OPTS,
},
'startcontainer': {
'msg': 'start the container resource',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'stopcontainer': {
'msg': 'stop the container resource',
'options': ACTION_OPTS,
},
'provision': {
'msg': 'provision and start the service',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'unprovision': {
'msg': 'stop and unprovision the service. beware: data will be '
'lost upon fs and disk unprovisioning.',
'options': ACTION_OPTS,
},
'disable': {
'msg': 'disable resources passed through --rid in services passed'
' through --service. Specifying no resource disables the '
'whole service.',
'options': [
OPT.rid,
OPT.tags,
OPT.subsets,
],
},
'enable': {
'msg': 'enable resources passed through --rid in services passed'
' through --service. Specifying no resource enables the '
'whole service.',
'options': [
OPT.rid,
OPT.tags,
OPT.subsets,
],
},
'status': {
'msg': 'return service overall status code',
'options': [
OPT.refresh,
],
},
'print_status': {
'msg': 'display service resource status',
'options': [
OPT.format,
OPT.hide_disabled,
OPT.refresh,
OPT.show_disabled,
],
},
'print_resource_status': {
'msg': 'display a specific service resource status, pointed by'
' --rid',
'options': [
OPT.format,
OPT.refresh,
OPT.rid,
],
},
'print_config_mtime': {
'msg': 'display service configuration file modification time',
},
'freeze': {
'msg': 'set up a flag to block actions on this service',
},
'thaw': {
'msg': 'remove the flag to unblock actions on this service',
},
'frozen': {
'msg': 'report on the current blocking of actions on this service',
},
'run': {
'msg': 'run all tasks, or tasks specified by --rid --tags and '
'--subset, disregarding their schedule',
'options': ACTION_OPTS,
},
'startdisk': {
'msg': 'combo action, activating standby disks, taking '
'reservations, starting loopback devices and volume '
'groups',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'stopdisk': {
'msg': 'combo action, stopping volume groups and loopback '
'devices, droping reservations, disabling standby disks',
'options': ACTION_OPTS,
},
'presync': {
'msg': 'update var files associated to resources',
'options': ACTION_OPTS,
},
'postsync': {
'msg': 'make use of files received from master nodes in var',
'options': ACTION_OPTS,
},
'prstart': {
'msg': 'reserve scsi disks held by this service',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'prstop': {
'msg': 'release scsi disks held by this service',
'options': ACTION_OPTS,
},
'prstatus': {
'msg': 'report status of reservations on scsi disks held by this '
'service',
},
'restart': {
'msg': 'combo action, chaining stop-start',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'resync': {
'msg': 'combo action, chaining stop-sync_resync-start',
'options': ACTION_OPTS + START_ACTION_OPTS,
},
'sync_nodes': {
'msg': 'send to peer nodes the service config files and '
'additional files described in the config file.',
'options': ACTION_OPTS,
},
'sync_drp': {
'msg': 'send to drp nodes the service config files and '
'additional files described in the config file.',
'options': ACTION_OPTS,
},
'sync_quiesce': {
'msg': 'trigger a storage hardware-assisted disk synchronization',
'options': ACTION_OPTS,
},
'sync_break': {
'msg': 'split a storage hardware-assisted disk synchronization',
'options': ACTION_OPTS,
},
'sync_split': {
'msg': 'split a EMC SRDF storage hardware-assisted disk '
'synchronization',
'options': ACTION_OPTS,
},
'sync_establish': {
'msg': 'establish a EMC SRDF storage hardware-assisted disk '
'synchronization',
'options': ACTION_OPTS,
},
'sync_resync': {
'msg': 'like sync_update, but not triggered by the scheduler '
'(thus adapted for clone/snap operations)',
'options': ACTION_OPTS,
},
'sync_full': {
'msg': 'trigger a full copy of the volume to its target',
'options': ACTION_OPTS,
},
'sync_restore': {
'msg': 'trigger a restore of the sync resources data to their '
'target path (DANGEROUS: make sure you understand before '
'running this action).',
'options': ACTION_OPTS,
},
'sync_update': {
'msg': 'trigger a one-time resync of the volume to its target',
'options': ACTION_OPTS,
},
'sync_resume': {
'msg': 're-establish a broken storage hardware-assisted '
'synchronization',
'options': ACTION_OPTS,
},
'sync_revert': {
'msg': 'revert to the pre-failover data (looses current data)',
'options': ACTION_OPTS,
},
'sync_verify': {
'msg': 'trigger a one-time checksum-based verify of the volume '
'and its target',
'options': ACTION_OPTS,
},
'sync_all': {
'msg': 'combo action, chaining sync_nodes-sync_drp-sync_update.',
'options': ACTION_OPTS,
},
'push': {
'msg': 'push service configuration to the collector',
},
'pull': {
'msg': 'pull a service configuration from the collector',
'options': [
OPT.provision,
],
},
'push_resinfo': {
'msg': 'push service resources and application launchers info '
'key/value pairs the collector',
},
'push_service_status': {
'msg': 'push service and its resources status to database',
},
'print_disklist': {
'msg': 'print service disk list',
'options': [
OPT.format,
OPT.rid,
OPT.tags,
OPT.subsets,
],
},
'print_devlist': {
'msg': 'print service device list',
'options': [
OPT.format,
OPT.rid,
OPT.tags,
OPT.subsets,
],
},
'switch': {
'msg': 'stop the service on the local node and start on the '
'remote node. --to specify the remote node to '
'switch the service to.',
'options': ACTION_OPTS + START_ACTION_OPTS + [
OPT.to,
],
},
'migrate': {
'msg': 'live migrate the service to the remote node. '
'--to specify the remote node to migrate the '
'service to.',
'options': ACTION_OPTS + START_ACTION_OPTS + [
OPT.to,
],
},
'resource_monitor': {
'msg': 'detect monitored resource failures and trigger '
'monitor_action',
'options': ACTION_OPTS,
},
'stonith': {
'msg': 'command provided to the heartbeat daemon to fence peer '
'node in case of split brain',
'options': ACTION_OPTS,
},
'docker': {
'msg': 'wrap the docker client command, setting automatically '
'the socket parameter to join the service-private docker '
'daemon. The %as_service%, %images% and %instances% words '
'in the wrapped command are replaced by, respectively, '
'the registry login username/password/email parameters to '
'log as a service using @ as the '
'username and the node uuid as password (which is what '
'is expected when the opensvc collector is used as the '
'JWT manager for the registry), the set of docker '
'instance names and images for container resources '
'passing the --tags, --rid and --subsets filters. This is '
'useful to remove all instances of a service or all '
'instances of resources with a tag like "frontend". Note '
'the opensvc filters must be positioned before the docker '
'command in the arguments list.',
},
'print_schedule': {
'msg': 'print the service tasks schedule',
'options': [
OPT.format,
OPT.verbose,
],
},
'scheduler': {
'msg': 'run the service task scheduler',
},
'pg_freeze': {
'msg': 'freeze the tasks of a process group',
'options': ACTION_OPTS,
},
'pg_thaw': {
'msg': 'thaw the tasks of a process group',
'options': ACTION_OPTS,
},
'pg_kill': {
'msg': 'kill the tasks of a process group',
'options': ACTION_OPTS,
},
'logs': {
'msg': 'display the service logs in the pager',
},
},
'Service configuration': {
'print_config': {
'msg': 'display service current configuration',
'options': [
OPT.format,
],
},
'edit_config': {
'msg': 'edit service configuration',
'options': [
OPT.discard,
OPT.recover,
],
},
'validate_config': {
'msg': 'check the sections and parameters are valid.',
},
'create': {
'msg': 'create a new service configuration file. --interactive '
'triggers the interactive mode. --template ||| fetchs and '
'installs a service config template. --config | fetchs and installs a service config file. '
'--provision create the system resources defined in the '
'service config.',
'options': ACTION_OPTS + [
OPT.config,
OPT.env,
OPT.interactive,
OPT.provision,
OPT.resource,
OPT.template,
],
},
'update': {
'msg': 'update definitions in an existing service configuration '
'file',
'options': ACTION_OPTS + [
OPT.interactive,
OPT.provision,
OPT.resource,
],
},
'delete': {
'msg': 'delete the service instance on the local node if no '
'--rid is specified, or delete the resources pointed by '
'--rid in services passed through --service',
'options': ACTION_OPTS + [
OPT.unprovision,
],
},
'set': {
'msg': 'set a service configuration parameter',
'options': [
OPT.param,
OPT.value,
],
},
'get': {
'msg': 'get the raw or dereferenced value of a service '
'configuration parameter',
'options': [
OPT.eval,
OPT.param,
],
},
'unset': {
'msg': 'unset a node configuration parameter pointed by --param',
'options': [
OPT.param,
],
},
},
'Compliance': {
'compliance_auto': {
'msg': 'run compliance checks or fixes depending on the autofix'
'module property values.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_check': {
'msg': 'run compliance checks.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_fix': {
'msg': 'run compliance fixes.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_fixable': {
'msg': 'verify compliance fixes prerequisites.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_env': {
'msg': 'show the compliance modules environment variables.',
'options': [
OPT.module,
OPT.moduleset,
],
},
'compliance_show_status': {
'msg': 'show compliance modules status',
},
'compliance_show_moduleset': {
'msg': 'show compliance rules applying to this service',
},
'compliance_list_moduleset': {
'msg': 'list available compliance modulesets. --moduleset f% '
'limit the scope to modulesets matching the f% pattern.',
},
'compliance_attach_moduleset': {
'msg': 'attach moduleset specified by --moduleset to this service',
'options': [
OPT.moduleset,
],
},
'compliance_detach_moduleset': {
'msg': 'detach moduleset specified by --moduleset from this '
'service',
'options': [
OPT.moduleset,
],
},
'compliance_list_ruleset': {
'msg': 'list available compliance rulesets. --ruleset f% limit '
'the scope to rulesets matching the f% pattern.',
},
'compliance_show_ruleset': {
'msg': 'show compliance rules applying to this node',
},
'compliance_attach_ruleset': {
'msg': 'attach ruleset specified by --ruleset to this service',
'options': [
OPT.ruleset,
],
},
'compliance_detach_ruleset': {
'msg': 'detach ruleset specified by --ruleset from this service',
'options': [
OPT.ruleset,
],
},
'compliance_attach': {
'msg': 'attach ruleset specified by --ruleset and/or moduleset '
'specified by --moduleset to this service',
'options': [
OPT.moduleset,
OPT.ruleset,
],
},
'compliance_detach': {
'msg': 'detach ruleset specified by --ruleset and/or moduleset '
'specified by --moduleset from this service',
'options': [
OPT.moduleset,
OPT.ruleset,
],
},
},
'Collector management': {
'collector_ack_unavailability': {
'msg': 'acknowledge an unavailability period. the period is '
'specified by --begin/--end or --begin/--duration. '
'omitting --begin defaults to now. an acknowlegment can '
'be completed by --author (defaults to root@nodename), '
'--account (default to 1) and --comment',
'options': [
OPT.author,
OPT.account,
OPT.begin,
OPT.end,
OPT.comment,
OPT.duration,
],
},
'collector_list_unavailability_ack': {
'msg': 'list acknowledged periods for the service. the periods '
'can be filtered by --begin/--end. omitting --end '
'defaults to now. the wildcard for --comment and '
'--author is %',
'options': [
OPT.author,
OPT.begin,
OPT.end,
OPT.comment,
],
},
'collector_list_actions': {
'msg': 'list actions on the service, whatever the node, during '
'the period specified by --begin/--end. --end defaults to '
'now. --begin defaults to 7 days ago',
'options': [
OPT.begin,
OPT.end,
OPT.format,
],
},
'collector_ack_action': {
'msg': 'acknowledge an action error on the service. an '
'acknowlegment can be completed by --author (defaults '
'to root@nodename) and --comment',
'options': [
OPT.author,
OPT.comment,
],
},
'collector_show_actions': {
'msg': 'show actions detailed log. a single action is specified '
'by --id. a range is specified by --begin/--end dates. '
'--end defaults to now. --begin defaults to 7 days ago',
'options': [
OPT.begin,
OPT.id,
OPT.end,
OPT.format,
],
},
'collector_checks': {
'msg': 'display service checks',
'options': [
OPT.format,
],
},
'collector_disks': {
'msg': 'display service disks',
'options': [
OPT.format,
],
},
'collector_log': {
'msg': 'log a message in the collector\'s service log',
'options': [
OPT.message,
],
},
'collector_alerts': {
'msg': 'display service alerts',
'options': [
OPT.format,
],
},
'collector_events': {
'msg': 'display service events during the period specified by '
'--begin/--end. --end defaults to now. --begin defaults '
'to 7 days ago',
'options': [
OPT.begin,
OPT.end,
OPT.format,
],
},
'collector_asset': {
'msg': 'display asset information known to the collector',
'options': [
OPT.format,
],
},
'collector_networks': {
'msg': 'display network information known to the collector for '
'each service ip',
'options': [
OPT.format,
],
},
'collector_tag': {
'msg': 'set a service tag (pointed by --tag)',
'options': [
OPT.tag,
],
},
'collector_untag': {
'msg': 'unset a service tag (pointed by --tag)',
'options': [
OPT.tag,
],
},
'collector_show_tags': {
'msg': 'list all service tags',
'options': [
OPT.format,
],
},
'collector_list_tags': {
'msg': 'list all available tags. use --like to filter the output.',
'options': [
OPT.format,
OPT.like,
],
},
'collector_create_tag': {
'msg': 'create a new tag',
'options': [
OPT.tag,
],
},
},
}
DEPRECATED_ACTIONS = [
"collector_json_alerts",
"collector_json_asset",
"collector_json_checks",
"collector_json_disks",
"collector_json_events",
"collector_json_list_actions",
"collector_json_list_unavailability_ack",
"collector_json_networks",
"collector_json_show_actions",
"collector_json_status",
"push_appinfo",
"json_config",
"json_devlist",
"json_disklist",
"json_env",
"json_schedule",
"json_status",
"syncall",
"syncbreak",
"syncestablish",
"syncnodes",
"syncdrp",
"syncfullsync",
"syncquiesce",
"syncresync",
"syncsplit",
"syncupdate",
"syncresume",
"syncrevert",
"syncverify",
]
ACTIONS_TRANSLATIONS = {
"push_env_mtime": "push_config_mtime",
"push_env": "push_config",
"json_env": "json_config",
"syncall": "sync_all",
"syncbreak": "sync_break",
"syncdrp": "sync_drp",
"syncestablish": "sync_establish",
"syncfullsync": "sync_full",
"syncnodes": "sync_nodes",
"syncquiesce": "sync_quiesce",
"syncrestore": "sync_restore",
"syncresume": "sync_resume",
"syncresync": "sync_resync",
"syncrevert": "sync_revert",
"syncsplit": "sync_split",
"syncupdate": "sync_update",
"syncverify": "sync_verify",
}
class SvcmgrOptParser(OptParser):
"""
The svcmgr-specific options parser class
"""
def __init__(self, args=None, colorize=True, width=None, formatter=None,
indent=6):
OptParser.__init__(self, args=args, prog=PROG, options=OPT,
actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
actions_translations=ACTIONS_TRANSLATIONS,
global_options=GLOBAL_OPTS,
svcmgr_options=SVCMGR_OPTS,
colorize=colorize, width=width,
formatter=formatter, indent=indent)
opensvc-1.8~20170412/lib/resSyncDcsCkpt.py 0000644 0001750 0001750 00000015216 13073467726 020312 0 ustar jkelbert jkelbert import os
import rcExceptions as ex
import rcStatus
import datetime
import resSyncDcs
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
class syncDcsCkpt(resSyncDcs.SyncDcs):
def can_sync(self, target=None):
return True
def checkpoint(self):
cmd = ""
vars = ""
for i, d in enumerate(self.pairs):
cmd += '$v%d=Get-DcsVirtualDisk -VirtualDisk %s -connection %s;'%(i, d['src'], self.conn)
vars += '$v%d '%i
cmd += "echo %s|Set-DcsReplicationCheckPoint -connection %s"%(vars, self.conn)
self.dcscmd(cmd, verbose=True)
self.update_tsfile()
def get_snap(self, snap):
if snap in self._info:
return self._info[snap]
cmd = 'get-dcssnapshot -snapshot %s -connection %s;'%(snap, self.conn)
try:
ret, out, err = self.dcscmd(cmd)
except:
return None
info = {}
lines = out.split('\n')
for line in lines:
l = line.split(': ')
if len(l) != 2:
continue
var = l[0].strip()
val = l[1].strip()
if var == 'TimeStamp':
info['TimeStamp'] = datetime.datetime.strptime(val, "%d/%m/%Y %H:%M:%S")
elif var in ['Type', 'State', 'ActiveOperation', 'Failure']:
info[var] = val
if len(info) > 0:
self._info[snap] = info
return info
def drpnodes_status(self, verbose=False, skip_prereq=False):
err = False
errlog = []
log = []
try:
self.get_auth()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
for snap in [p['dst_ckpt'] for p in self.pairs]:
info = self.get_snap(snap)
if info is None:
errlog.append("checkpoint snapshot %s does not exists"%snap)
err |= True
continue
if info['State'] not in ['Healthy']:
errlog.append("checkpoint snapshot %s state is %s"%(snap, info['State']))
err |= True
if info['Failure'] not in ['NoFailure']:
errlog.append("checkpoint snapshot %s failure state is %s"%(snap, info['Failure']))
err |= True
if info['TimeStamp'] < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
errlog.append("checkpoint snapshot %s too old"%snap)
err |= True
log.append("last update on %s"%info['TimeStamp'])
if err:
self.status_log('\n'.join(errlog))
return rcStatus.WARN
self.status_log('\n'.join(log))
return rcStatus.UP
def nodes_status(self, verbose=False, skip_prereq=False):
err = False
ts = self.read_tsfile()
if ts < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
self.status_log("checkpoint too old")
err |= True
self.status_log("last update on %s"%str(ts))
if err:
return rcStatus.WARN
return rcStatus.UP
def _status(self, verbose=False, skip_prereq=False):
if rcEnv.nodename in self.svc.nodes:
return self.nodes_status(verbose, skip_prereq)
else:
return self.drpnodes_status(verbose, skip_prereq)
def pause_checkpoint(self):
cmd = ""
for d in self.pairs:
cmd += 'Disable-DcsTask -Task %s -connection %s ; '%(self.task_name(d['dst_ckpt']), self.conn)
self.dcscmd(cmd, verbose=True)
def create_task(self):
cmd = "Get-DcsTask -connection %s"%self.conn
tasks = []
ret, out, err = self.dcscmd(cmd, verbose=False)
for line in out.split('\n'):
if line.startswith("Caption"):
tasks.append(line.split(':')[-1].strip())
cmd = ""
for d in self.pairs:
if self.task_name(d['dst_ckpt']) in tasks:
continue
cmd += "Add-DcsTask -Disabled -Name %s -connection %s; "%(self.task_name(d['dst_ckpt']), self.conn)
cmd += 'Add-DcsTrigger -Task %s -VirtualDisk "%s" -connection %s; '%(self.task_name(d['dst_ckpt']), d['dst'], self.conn)
cmd += 'Add-DcsAction -Task %s -MethodActionType UpdateSnapshot -connection %s -TargetId (Get-DcsSnapshot -snapshot "%s" -connection %s).Id ; '%(self.task_name(d['dst_ckpt']), self.conn, d['dst_ckpt'], self.conn)
self.dcscmd(cmd, verbose=True)
def resume_checkpoint(self):
self.create_task()
cmd = ""
for d in self.pairs:
cmd += 'Enable-DcsTask -Task %s -connection %s; '%(self.task_name(d['dst_ckpt']), self.conn)
self.dcscmd(cmd, verbose=True)
def task_name(self, id):
return '-'.join((self.svc.svcname, self.rid, id))
def sync_break(self):
self.pause_checkpoint()
def start(self):
if rcEnv.nodename not in self.svc.drpnodes:
return
self.pause_checkpoint()
def sync_resume(self):
self.resume_checkpoint()
def sync_update(self):
self.checkpoint()
def refresh_svcstatus(self):
self.svcstatus = self.svc.group_status(excluded_groups=set(["sync", 'hb', 'app']))
def get_svcstatus(self):
if len(self.svcstatus) == 0:
self.refresh_svcstatus()
def __init__(self,
rid=None,
pairs=[],
manager=set([]),
dcs=set([]),
**kwargs):
resSyncDcs.SyncDcs.__init__(self,
rid=rid, type="sync.dcsckpt",
manager=manager,
dcs=dcs,
**kwargs)
self.label = "DCS checkpoint snapshot of %s"%' ,'.join(map(lambda x: x['src'], pairs))
self.pairs = pairs
self._info = {}
def tsfile(self):
return os.path.join(rcEnv.pathvar, '.'.join((self.svc.svcname, self.rid, 'ts')))
def update_tsfile(self):
import datetime
now = datetime.datetime.now()
with open(self.tsfile(), 'w') as f:
f.write(str(now)+'\n')
def read_tsfile(self):
import datetime
try:
with open(self.tsfile(), 'r') as f:
ts = datetime.datetime.strptime(f.read(),"%Y-%m-%d %H:%M:%S.%f\n")
except:
ts = datetime.datetime(year=2000, month=01, day=01)
return ts
def __str__(self):
return "%s dcs=%s manager=%s pairs=%s" % (
resSync.Sync.__str__(self),
' '.join(self.dcs),
' '.join(self.manager),
str(pairs))
opensvc-1.8~20170412/lib/resDiskVdisk.py 0000644 0001750 0001750 00000003474 13073467726 020020 0 ustar jkelbert jkelbert """ Module providing device path remapping for libvirt VMs
"""
import resources as Res
import rcStatus
import rcExceptions as ex
from rcGlobalEnv import rcEnv
class Disk(Res.Resource):
def __init__(self,
rid=None,
name=None,
devpath={},
**kwargs):
Res.Resource.__init__(self,
rid,
"disk.vdisk",
**kwargs)
self.label = "vdisk "+name
self.name = name
self.disks = set()
self.devpath = devpath
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def disklist(self):
return self.disks
def remap(self):
path = self.devpath[rcEnv.nodename]
paths = set(self.devpath.values()) - set(self.devpath[rcEnv.nodename])
from xml.etree.ElementTree import ElementTree, SubElement
tree = ElementTree()
try:
tree.parse(self.svc.resources_by_id['container'].cf)
except:
self.log.error("failed to parse %s"%self.svc.resources_by_id['container'].cf)
raise ex.excError
for dev in tree.getiterator('disk'):
s = dev.find('source')
if s is None:
continue
il = s.items()
if len(il) != 1:
continue
attr, devp = il[0]
if devp in paths:
self.log.info("remapping device path: %s -> %s"%(devp,path))
s.set('dev', path)
#SubElement(dev, "source", {'dev': path})
tree.write(self.svc.resources_by_id['container'].cf)
def stop(self):
pass
def start(self):
self.remap()
def _status(self, verbose=False):
return rcStatus.NA
opensvc-1.8~20170412/lib/svcSg.py 0000644 0001750 0001750 00000022360 13073467726 016473 0 ustar jkelbert jkelbert import os
import svc
import rcExceptions as ex
from rcUtilities import justcall
from rcGlobalEnv import rcEnv
class SvcSg(svc.Svc):
builder_props = [
"nodes",
]
def __init__(self, svcname, pkg_name=None):
self.type = "sg"
svc.Svc.__init__(self, svcname)
self.pkg_name = pkg_name
self.load_paths()
def load_paths(self):
p = '/usr/local/cmcluster/bin/'
if os.path.exists(p):
self.prefix = p
else:
self.prefix = ''
self.cmviewcl_bin = self.prefix + 'cmviewcl'
self.cmgetconf_bin = self.prefix + 'cmgetconf'
self.cntl = {"vg": {}, "ip": {}, "fs": {}}
def load_cmviewcl(self):
self.cmviewcl = {}
cmd = [self.cmviewcl_bin, "-p", self.pkg_name, "-v", "-f", "line"]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excInitError(err)
for line in out.split("\n"):
if "=" not in line:
continue
i = line.index('=')
param = line[:i]
value = line[i+1:]
if '|' in param:
l = param.split('|')
if len(l) == 2:
res, param = l
node = None
elif len(l) == 3:
res, node, param = l
node = node.replace('node:', '')
else:
print l
continue
restype, resname = res.split(':')
if restype not in self.cmviewcl:
self.cmviewcl[restype] = {}
if resname not in self.cmviewcl[restype]:
self.cmviewcl[restype][resname] = {}
if node is not None:
self.cmviewcl[restype][resname][(param,node)] = value
else:
self.cmviewcl[restype][resname][param] = value
else:
self.cmviewcl[param] = value
#print self.cmviewcl
def load_cmgetconf(self):
if self.cmviewcl.get('style') != "modular":
return
cmd = [self.cmgetconf_bin, "-p", self.pkg_name]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excInitError(err)
lines = out.split("\n")
l = []
for line in lines:
if len(line) == 0:
continue
if line.startswith("#"):
continue
if '\t' not in line:
continue
i = line.index('\t')
param = line[:i]
value = line[i+1:]
value = value.strip().strip('"')
l.append((param, value))
index1 = None
for i, (param, value) in enumerate(l):
if param in ("vg", "cvm_vg", "vxvm_vg"):
self.cntl["vg"][value] = {param.upper(): value}
continue
if param == "ip_subnet":
index1 = "ip"
index2 = 0
ip_subnet = value
continue
if param == "ip_address":
self.cntl[index1][index2] = {
'IP': value,
'SUBNET': ip_subnet,
}
index2 += 1
continue
if param == "fs_name":
index1 = "fs"
index2 = value
self.cntl[index1][index2] = {}
if index1 is None:
continue
try:
self.cntl[index1][index2][param] = value
except:
continue
if index1 == "fs" and param == "fs_fsck_opt":
index1 = None
#print self.cntl
def load_cntl(self):
if 'run_script' not in self.cmviewcl:
# modular package
return
p = self.cmviewcl['run_script']
try:
f = open(p, 'r')
buff = f.read()
f.close()
except:
self.log.error("failed to load %s"%p)
raise ex.excError
for line in buff.split('\n'):
line = line.strip()
if line.startswith("#"):
continue
if len(line) == 0:
continue
for _line in line.split(';'):
_line = _line.strip()
if _line.startswith("VG[") or \
_line.startswith("CVM_DG[") or \
_line.startswith("VXVM_DG[") or \
_line.startswith("IP[") or \
_line.startswith("SUBNET[") or \
_line.startswith("LV[") or \
_line.startswith("FS_MOUNT_OPT[") or \
_line.startswith("FS_TYPE[") or \
_line.startswith("FS["):
self.cntl_parse(_line)
def cntl_parse(self, s):
i = s.index('[')
param = s[:i]
s = s[i+1:]
i = s.index(']')
index = s[:i]
try:
int(index)
except:
return
value = s[i+2:].strip('"')
if param in ["VG", "CVM_DG", "VXVM_DG"]:
if index not in self.cntl['vg']:
self.cntl['vg'][index] = {}
self.cntl['vg'][index][param] = value.replace('/dev/', '')
if param in ["IP", "SUBNET"]:
if index not in self.cntl['ip']:
self.cntl['ip'][index] = {}
self.cntl['ip'][index][param] = value
if param in ["FS", "LV", "FS_MOUNT_OPT", "FS_TYPE"]:
if index not in self.cntl['fs']:
self.cntl['fs'][index] = {}
self.cntl['fs'][index][param] = value
def builder(self):
if self.pkg_name is None:
self.error("pkg_name is not set")
raise ex.excInitError()
self.load_cmviewcl()
if len(self.cmviewcl) == 0:
raise ex.excInitError()
self.load_cntl()
self.load_cmgetconf()
self.nodes = set(self.cmviewcl['node'].keys())
self.load_hb()
self.load_resources()
self.load_ip_addresses()
self.load_vgs()
def load_hb(self):
if self.cmviewcl['highly_available'] != "yes":
return
rid = 'hb#sg0'
m = __import__("resHbSg")
r = m.Hb(rid, name=self.cmviewcl['name'])
self += r
def load_vgs(self):
self.n_vg = 0
for i in self.cntl['vg']:
data = self.cntl['vg'][i]
self.load_vg(data)
def load_vg(self, data):
if 'VG' in data:
name = data['VG'].replace('/dev/', '')
type = ""
elif 'CVM_DG' in data:
name = data['CVM_DG']
type = "Cvm"
elif 'VXVM_DG' in data:
name = data['VXVM_DG']
type = "VxVm"
n = self.n_vg
rid = 'vg#sg%d'%n
modname = "resDiskVg"+type+"Sg"+rcEnv.sysname
try:
m = __import__(modname)
except ImportError:
self.log.error("module %s is not implemented"%modname)
return
r = m.Disk(rid, name=name)
if 'service' in self.cmviewcl:
for data in self.cmviewcl['service'].values():
if 'command' not in data:
continue
if name in data['command'].split():
r.monitor = True
self += r
self.n_vg += 1
def load_ip_addresses(self):
self.n_ip_address = 0
for i in self.cntl['ip']:
data = self.cntl['ip'][i]
self.load_ip_address(data)
def load_ip_address(self, data):
ipname = data['IP']
subnet = data['SUBNET']
n = self.n_ip_address
rid = 'ip#sg%d'%n
m = __import__("resIpSg"+rcEnv.sysname)
r = m.Ip(rid, ipdev="", ipname=ipname, mask="")
if 'subnet' in self.cmviewcl and \
subnet in self.cmviewcl['subnet']:
r.monitor = True
self += r
self.n_ip_address += 1
def load_resources(self):
self.n_resource = 0
for i in self.cntl['fs']:
data = self.cntl['fs'][i]
self.load_resource(data)
def load_resource(self, data):
if 'LV' in data:
dev = data['LV']
mnt = data['FS']
mntopt = data['FS_MOUNT_OPT']
fstype = data['FS_TYPE']
else:
if data['fs_server'] != "":
dev = data['fs_server'] + ":" + data['fs_name']
else:
dev = data['fs_name']
mnt = data['fs_directory']
mntopt = data['fs_mount_opt']
fstype = data['fs_type']
vgname = dev.split('/')[2]
lvname = dev.split('/')[3]
n = self.n_resource
rid = 'fs#sg%d'%n
m = __import__("resFsSg"+rcEnv.sysname)
r = m.Mount(rid=rid, mount_point=mnt, device=dev,
fs_type=fstype, mount_options=mntopt)
r.mon_name = '/vg/%s/lv/status/%s'%(vgname, lvname)
if 'resource' in self.cmviewcl and \
r.mon_name in self.cmviewcl['resource']:
r.monitor = True
if 'service' in self.cmviewcl:
for data in self.cmviewcl['service'].values():
if 'command' not in data:
continue
if dev in data['command']:
r.monitor = True
self += r
self.n_resource += 1
opensvc-1.8~20170412/lib/checkRaidSas2Linux.py 0000777 0001750 0001750 00000000000 13073467726 024006 2checkRaidSas2.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/rcExceptions.py 0000644 0001750 0001750 00000006202 13073467726 020051 0 ustar jkelbert jkelbert class MonitorAction(Exception):
"""
A class dedicated to propagate up the stack a need to run the
resource_monitor action.
"""
pass
class excEncapUnjoignable(Exception):
pass
class excError(Exception):
""" Failed action
"""
def __init__(self, value=""):
self.value = value
def __str__(self):
return str(self.value)
class OptNotFound(Exception):
""" Service config file option not found
"""
class excSignal(Exception):
""" Termination signal received
"""
class excUndefined(Exception):
""" Mandatory Undefined action exception
"""
def __init__(self,action=None,className=None,func=None):
self.action=action
self.className=className
self.func=func
def __str__(self):
return "Undefined mandatory Action %s for className %s in function %s" % \
(self.action,self.className,self.func)
class syncNoNodesToSync(Exception):
""" No nodes to sync => abort stacked syncs resource actions
"""
class syncNoFilesToSync(Exception):
""" No files to sync => move on to the next stacked syncs resource actions
"""
class syncConfigSyntaxError(Exception):
""" Bogus configuration syntax => abort all
"""
class syncNotSnapable(Exception):
""" A dir/file specified as source of a sync with snap is not included in a
snapable resource mount => abort all
"""
class syncSnapExists(Exception):
""" The snapshot already exists
"""
class syncSnapCreateError(Exception):
""" Error in snapshot creation => clean up
"""
class syncSnapDestroyError(Exception):
""" Error in snapshot destroy => clean up
"""
class syncSnapMountError(Exception):
""" Error mounting fs => clean up
"""
class excEndAction(Exception):
""" End multi-resource action. Not an error.
"""
class excContinueAction(Exception):
""" Abort current resource action, but proceed anyway
"""
class excAbortAction(Exception):
""" Abort multi-resource action
"""
class excInitError(Exception):
""" Resource initialisation error
"""
class excScsiPrNotsupported(Exception):
""" Scsi persistent reservation is not supported
"""
class excNotAvailable(Exception):
""" Not available
"""
class excNotSupported(Exception):
""" Not supported
"""
class excBug(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MissImpl(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IpDevDown(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IpConflict(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IpAlreadyUp(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IpNoActions(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
opensvc-1.8~20170412/lib/resScsiReservOSF1.py 0000644 0001750 0001750 00000014506 13073467726 020644 0 ustar jkelbert jkelbert import resources as Res
import uuid
import re
import os
import time
import rcStatus
import rcExceptions as ex
from rcUtilities import which
from subprocess import *
import resScsiReserv
class ScsiReserv(resScsiReserv.ScsiReserv):
def __init__(self,
rid=None,
peer_resource=None,
no_preempt_abort=False,
prkey=None,
**kwargs):
resScsiReserv.ScsiReserv.__init__(self,
rid=rid,
peer_resource=peer_resource,
no_preempt_abort=no_preempt_abort,
prkey=prkey,
**kwargs)
self.prtype = 'wero'
self.disk_id = {}
self.itn = {}
def get_disks(self):
if len(self.disks) > 0:
return
self.disks = map(lambda x: str(x.replace('/disk/', '/rdisk/')), self.peer_resource.disklist())
def scsireserv_supported(self):
if which('scu') is None:
return False
return True
def ack_unit_attention(self, d):
return 0
def get_disk_ids(self):
if len(self.disk_id) > 0:
return
cmd = [ 'hwmgr', 'show', 'scsi' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError(err)
for line in out.split('\n'):
v = line.split()
if len(v) < 7:
continue
if v[3] != "disk":
continue
if not v[7].startswith("dsk"):
continue
self.disk_id[v[7]+'c'] = v[0].strip(":")
def get_itns(self):
if len(self.itn) > 0:
return
self.get_disk_ids()
for disk in self.disk_id:
self.get_itn(disk)
def get_itn(self, disk):
if disk in self.itn:
return
self.itn[disk] = []
if disk not in self.disk_id:
return
id = self.disk_id[disk]
cmd = [ 'hwmgr', 'show', 'scsi', '-id', id, '-full' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
return
for line in out.split('\n'):
v = line.split()
if len(v) != 4:
continue
data = {}
for i, s in enumerate(("bus", "target", "lun")):
try:
j = int(v[i])
data[s] = v[i]
except:
continue
self.itn[disk].append(data)
def set_nexus(self, itn):
return "set nexus bus %(bus)s target %(target)s lun %(lun)s ; " % itn
def pipe_scu(self, cmd):
self.log.info(cmd + ' | scu')
p = Popen(['scu'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(input=cmd)
#if len(out) > 0:
# self.log.info(out)
if len(err) > 0:
self.log.error(out)
if p.returncode:
raise ex.excError
def disk_registered(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'keys' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to read registrations for disk %s" % disk)
if out.count(self.hostid) == 0:
return False
return True
def disk_register(self, disk):
self.get_itns()
basedisk = os.path.basename(disk)
if basedisk not in self.itn:
self.log.error("no nexus information for disk %s"%disk)
return 1
r = 0
for itn in self.itn[basedisk]:
r += self.__disk_register(itn)
if r > 0:
r = 1
return r
def __disk_register(self, itn):
cmd = self.set_nexus(itn) + 'preserve register skey ' + self.hostid
try:
self.pipe_scu(cmd)
except ex.excError as e:
self.log.error("failed to register key %s with nexus %s" % (self.hostid, ':'.join(itn.values())))
return 1
return 0
def disk_unregister(self, disk):
self.get_itns()
basedisk = os.path.basename(disk)
if basedisk not in self.itn:
self.log.error("no nexus information for disk %s"%disk)
return 1
r = 0
for itn in self.itn[basedisk]:
r += self.__disk_unregister(itn)
if r > 0:
r = 1
return r
def __disk_unregister(self, itn):
cmd = self.set_nexus(itn) + 'preserve register skey 0 key ' + self.hostid
try:
self.pipe_scu(cmd)
except ex.excError as e:
self.log.error("failed to unregister key %s with nexus %s" % (self.hostid, ':'.join(itn.values())))
return 1
return 0
def get_reservation_key(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'reservation' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to list reservation for disk %s" % disk)
if 'Reservation Key' not in out:
return None
for line in out.split('\n'):
if 'Reservation Key' in line:
return line.split()[-1]
raise Exception()
def disk_reserved(self, disk):
cmd = [ 'scu', '-f', disk, 'show', 'reservation' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to read reservation for disk %s" % disk)
if self.hostid in out:
return True
return False
def disk_release(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'release', 'key', self.hostid, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to release disk %s" % disk)
return ret
def disk_reserve(self, disk):
cmd = [ 'scu', '-f', disk, 'preserve', 'reserve', 'key', self.hostid, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to reserve disk %s" % disk)
return ret
def _disk_preempt_reservation(self, disk, oldkey):
cmd = [ 'scu', '-f', disk, 'preserve', 'preempt', 'key', self.hostid, 'skey', oldkey, 'type', self.prtype ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to preempt reservation for disk %s" % disk)
return ret
opensvc-1.8~20170412/lib/checkEthSunOS.py 0000644 0001750 0001750 00000023730 13073467726 020056 0 ustar jkelbert jkelbert import checks
import os
import re
from rcUtilities import justcall
from rcUtilitiesSunOS import get_solaris_version
from rcGlobalEnv import rcEnv
"""
# ifconfig -a
lo0: flags=2001000849 mtu 8232 index 1
inet 127.0.0.1 netmask ff000000
lo0:1: flags=2001000849 mtu 8232 index 1
zone frcp00vpd0385
inet 127.0.0.1 netmask ff000000
lo0:2: flags=2001000849 mtu 8232 index 1
zone frcp00vpd0388
inet 127.0.0.1 netmask ff000000
lo0:3: flags=2001000849 mtu 8232 index 1
zone frcp00vpd0192
inet 127.0.0.1 netmask ff000000
lo0:4: flags=2001000849 mtu 8232 index 1
zone frcp00vpd0192
inet 128.1.1.192 netmask ffff0000
lo0:5: flags=2001000849 mtu 8232 index 1
zone frcp00vpd0179
inet 127.0.0.1 netmask ff000000
aggr1: flags=1000843 mtu 1500 index 2
inet 172.31.4.195 netmask ffffff00 broadcast 172.31.4.255
ether 0:15:17:bb:85:58
aggr1:1: flags=1000843 mtu 1500 index 2
zone frcp00vpd0385
inet 172.31.4.180 netmask ffffff00 broadcast 172.31.4.255
aggr1:2: flags=1000843 mtu 1500 index 2
zone frcp00vpd0388
inet 172.31.4.183 netmask ffffff00 broadcast 172.31.4.255
aggr1:3: flags=1000843 mtu 1500 index 2
zone frcp00vpd0179
inet 172.31.4.67 netmask ffffff00 broadcast 172.31.4.255
aggr2: flags=1000843 mtu 1500 index 5
inet 172.31.195.195 netmask ffffff00 broadcast 172.31.195.255
ether 0:15:17:bb:85:59
bnx3: flags=1000843 mtu 1500 index 4
inet 55.16.201.195 netmask fffffc00 broadcast 55.16.203.255
ether 0:24:e8:35:9d:dd
bnx3:1: flags=1000843 mtu 1500 index 4
zone frcp00vpd0385
inet 55.16.201.142 netmask fffffc00 broadcast 55.16.203.255
bnx3:2: flags=1000843 mtu 1500 index 4
zone frcp00vpd0388
inet 55.16.201.145 netmask fffffc00 broadcast 55.16.203.255
bnx3:3: flags=1000843 mtu 1500 index 4
zone frcp00vpd0179
inet 55.16.202.98 netmask fffffc00 broadcast 55.16.203.255
Solaris 10
==========
# ndd -get /dev/bnx3 link_speed
1000
# ndd -get /dev/bnx3 link_duplex
1
# ndd -get /dev/bnx3 link_status
1
kstat -p | grep link_ | grep ce:0:ce0:link
ce:0:ce0:link_asmpause 0
ce:0:ce0:link_duplex 2
ce:0:ce0:link_pause 0
ce:0:ce0:link_speed 1000
ce:0:ce0:link_up 1
Solaris 11
==========
# dladm show-link -p -o link,class,over l226g0
l226g0:vnic:aggr0
# dladm show-link -p -o link,class,over aggr0
aggr0:aggr:net0 net2
# dladm show-link -p -o link,class,over net0
net0:phys:
# dladm show-phys -p -o state,speed,duplex net0
up:1000:full
# dladm show-link -p -o link,class,over net2
# dladm show-phys -p -o state,speed,duplex net2
up:1000:full
"""
class check(checks.check):
chk_type = "eth"
kstat = None
def _findphys(self, netif):
res = ""
cmd = ['/usr/sbin/dladm', 'show-link', '-p', '-o', 'link,class,over', netif]
out, err, ret = justcall(cmd)
if ret != 0:
return ""
lines = out.split('\n')
for line in lines:
if len(line) == 0:
break
v = line.split(':')
if v[1] == 'phys':
self.l[self.topif].add(v[0])
else:
ifs = v[2].split(' ')
for i in ifs:
res = self._findphys(i)
return "OK"
def do_check(self):
self.osver = get_solaris_version()
self.ifs = []
cmd = ['/usr/sbin/ifconfig', '-a']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
for line in lines:
if line.startswith(' '):
continue
if line.startswith('lo'):
continue
if line.startswith('sppp'):
continue
if self.osver < 11:
if line.startswith('aggr'):
continue
if 'index' not in line:
continue
l = line.split(':')
if 'index' not in l[1]:
continue
if len(l[0]) < 3:
continue
if l[0] in self.ifs:
continue
else:
self.ifs.append(l[0])
if self.osver >= 11:
self.l = {}
for ifn in self.ifs:
if ifn not in self.l:
self.l[ifn] = set([])
self.topif = ifn
ret = self._findphys(ifn)
cmd = ['/usr/sbin/dladm', 'show-phys', '-p', '-o', 'link,state,speed,duplex,device']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
self.phys = lines
r = []
r += self.do_check_speed()
r += self.do_check_duplex()
r += self.do_check_link()
return r
def do_check_speed(self):
r = []
if self.osver >= 11:
for ifn in self.ifs:
for phy in self.l[ifn]:
for line in self.phys:
if line.startswith(phy+':'):
l = line.split(':')
val = l[2]
r.append({
'chk_instance': '%s.%s.speed'%(ifn,l[4]),
'chk_value': str(val),
'chk_svcname': '',
})
return r
for ifn in self.ifs:
val = self.get_param(ifn, 'link_speed')
r.append({
'chk_instance': '%s.speed'%ifn,
'chk_value': str(val),
'chk_svcname': '',
})
return r
def do_check_duplex(self):
r = []
if self.osver >= 11:
for ifn in self.ifs:
for phy in self.l[ifn]:
for line in self.phys:
if line.startswith(phy+':'):
l = line.split(':')
if l[3] != 'full':
val = 1
else:
val = 0
r.append({
'chk_instance': '%s.%s.duplex'%(ifn,l[4]),
'chk_value': str(val),
'chk_svcname': '',
})
return r
for ifn in self.ifs:
val = self.get_param(ifn, 'link_duplex')
r.append({
'chk_instance': '%s.duplex'%ifn,
'chk_value': str(val),
'chk_svcname': '',
})
return r
def do_check_link(self):
r = []
if self.osver >= 11:
for ifn in self.ifs:
for phy in self.l[ifn]:
for line in self.phys:
if line.startswith(phy+':'):
l = line.split(':')
if l[1] != 'up':
val = 1
else:
val = 0
r.append({
'chk_instance': '%s.%s.link'%(ifn,l[4]),
'chk_value': str(val),
'chk_svcname': '',
})
return r
for ifn in self.ifs:
val = self.get_param(ifn, 'link_status')
r.append({
'chk_instance': '%s.link'%ifn,
'chk_value': str(val),
'chk_svcname': '',
})
return r
def get_param(self, intf, param):
val = self.get_from_ndd(intf, param)
if val is None:
val = self.get_from_kstat(intf, param)
return val
def get_from_ndd(self, intf, param):
cmd = ['/usr/sbin/ndd', '-get', '/dev/'+intf, param]
out, err, ret = justcall(cmd)
if ret != 0:
return
return out.strip()
def get_from_kstat(self, intf, param):
inst = re.sub(r"[a-zA-Z]+", "", intf)
drv = re.sub(r"[0-9]+", "", intf)
data = {
"ce": {
"link_status": ":"+intf+":link_up",
"link_duplex": ":"+intf+":link_duplex",
"link_speed": ":"+intf+":link_speed",
},
"nxge": {
"link_status": ":mac:link_up",
"link_duplex": ":mac:link_duplex",
"link_speed": ":Port Stats:link_speed",
},
}
if self.kstat is None:
cmd = ['/usr/bin/kstat', '-p']
out, err, ret = justcall(cmd)
if ret == 0:
self.kstat = out
if self.kstat is None:
return
lines = self.kstat.split('\n')
if len(lines) == 0:
return
prefix = ':'.join((drv, inst))
if drv not in data:
return
_data = data[drv]
if param not in _data:
return
_param = _data[param]
patt = prefix + _param
for line in lines:
if not line.startswith(patt):
continue
l = line.split()
return l[-1]
return
opensvc-1.8~20170412/lib/resDiskVgVcsLinux.py 0000644 0001750 0001750 00000001107 13073467726 020777 0 ustar jkelbert jkelbert Res = __import__("resDiskVgLinux")
import rcStatus
import rcExceptions as ex
class Disk(Res.Disk):
def start(self):
pass
def stop(self):
pass
def _status(self, verbose=False):
try:
s = self.svc.get_res_val(self.vcs_name, 'State')
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if s == "ONLINE":
return rcStatus.UP
elif s == "OFFLINE":
return rcStatus.DOWN
else:
self.status_log(s)
return rcStatus.WARN
opensvc-1.8~20170412/lib/resFsOSF1.py 0000644 0001750 0001750 00000007624 13073467726 017127 0 ustar jkelbert jkelbert import os
import rcMountsOSF1 as rcMounts
import resFs as Res
from rcUtilities import qcall, protected_mount, getmount
from rcGlobalEnv import rcEnv
import rcExceptions as ex
from stat import *
def try_umount(self):
cmd = ['umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_warn=True)
if ret == 0:
return 0
if "not currently mounted" in err:
return 0
""" don't try to kill process using the source of a
protected bind mount
"""
if protected_mount(self.mount_point):
return 1
""" best effort kill of all processes that might block
the umount operation. The priority is given to mass
action reliability, ie don't contest oprator's will
"""
cmd = ['sync']
(ret, out, err) = self.vcall(cmd)
for i in range(4):
cmd = ['fuser', '-kcv', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
self.log.info('umount %s'%self.mount_point)
cmd = ['umount', self.mount_point]
ret = qcall(cmd)
if ret == 0:
break
return ret
class Mount(Res.Mount):
def __init__(self, **kwargs):
self.Mounts = None
Res.Mount.__init__(self, **kwargs)
self.fsck_h = {
'ufs': {
'bin': 'fsck',
'cmd': ['fsck', '-p', self.device], 'allowed_ret': []
},
}
def is_up(self):
self.Mounts = rcMounts.Mounts()
ret = self.Mounts.has_mount(self.device, self.mount_point)
if ret:
return True
if self.fs_type not in ["advfs"] + self.netfs:
# might be a loopback mount
try:
mode = os.stat(self.device)[ST_MODE]
except:
self.log.debug("can not stat %s" % self.device)
return False
return False
def devlist(self):
return self.disklist()
def disklist(self):
if '#' in self.device:
dom, fset = self.device.split('#')
for r in self.svc.get_resources('disk.vg'):
if r.name == dom:
# no need to compute device list: the vg resource will do the job
return set([])
import rcAdvfs
try:
o = rcAdvfs.Fdmns()
d = o.get_fdmn(dom)
except rcAdvfs.ExInit as e:
return set([])
if d is None:
return set([])
return set(d.list_volnames())
else:
return set([self.device])
def can_check_writable(self):
return True
def start(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
Res.Mount.start(self)
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return 0
self.fsck()
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
if self.fs_type != "":
fstype = ['-t', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
cmd = ['mount']+fstype+mntopt+[self.device, self.mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.Mounts = None
self.can_rollback = True
def stop(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return
for i in range(3):
ret = try_umount(self)
if ret == 0: break
if ret != 0:
self.log.error('failed to umount %s'%self.mount_point)
raise ex.excError
self.Mounts = None
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/snapLvmLinux.py 0000644 0001750 0001750 00000010555 13073467726 020051 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import protected_mount, justcall, which
from rcUtilitiesLinux import lv_info, lv_exists, udevadm_settle
import rcExceptions as ex
import snap
class Snap(snap.Snap):
def mntopt_and_ro(self, m):
opt_set = set([])
if m.fs_type == "xfs":
opt_set.add("nouuid")
if m.mount_options is None:
opt_set.add("ro")
return ','.join(opt_set)
opt_set |= set(m.mount_options.split(','))
opt_set -= set(['rw', 'ro'])
opt_set |= set(['ro'])
return ','.join(opt_set)
def snapcreate(self, m):
snap_name = ''
snap_mnt = ''
(vg_name, lv_name, lv_size) = lv_info(self, m.device)
if lv_name is None:
self.log.error("can not snap %s: not a logical volume"%m.device)
raise ex.syncNotSnapable
snap_name = 'osvc_sync_'+lv_name
if lv_exists(self, os.path.join(os.sep, 'dev', vg_name, snap_name)):
self.log.error("snap of %s already exists"%(lv_name))
raise ex.syncSnapExists
if m.snap_size is not None:
snap_size = m.snap_size
else:
snap_size = int(lv_size//10)
cmd = ['lvcreate', '-A', 'n', '-s', '-L'+str(snap_size)+'M', '-n', snap_name, os.path.join(vg_name, lv_name)]
self.log.info(' '.join(cmd))
out, err, ret = justcall(cmd)
err_l1 = err.split('\n')
err_l2 = []
out_l = out.split('\n')
for e in err_l1:
if 'This metadata update is NOT backed up' in e:
pass
else:
err_l2.append(e)
err = '\n'.join(err_l2)
out = '\n'.join(out_l)
if len(out) > 0:
self.log.info(out)
if len(err) > 0:
self.log.error(err)
if ret != 0:
raise ex.syncSnapCreateError
snap_mnt = os.path.join(rcEnv.pathtmp,
'osvc_sync_'+vg_name+'_'+lv_name)
if not os.path.exists(snap_mnt):
os.makedirs(snap_mnt, 0o755)
snap_dev = os.path.join(os.sep, 'dev', vg_name, snap_name)
if m.fs_type != "xfs":
self.vcall(['fsck', '-a', snap_dev], err_to_warn=True)
(ret, buff, err) = self.vcall(['mount', '-t', m.fs_type, '-o', self.mntopt_and_ro(m), snap_dev, snap_mnt])
if ret != 0:
self.vcall(["mount"])
self.vcall(["fuser", "-v", snap_mnt])
self.vcall(['lvremove', '-A', 'n', '-f', snap_dev])
raise ex.syncSnapMountError
self.snaps[m.mount_point] = dict(lv_name=lv_name,
vg_name=vg_name,
snap_name=snap_name,
snap_mnt=snap_mnt,
snap_dev=snap_dev)
def snapdestroykey(self, s):
if protected_mount(self.snaps[s]['snap_mnt']):
self.log.error("the snapshot is no longer mounted in %s. panic."%self.snaps[s]['snap_mnt'])
raise ex.excError
cmd = ['fuser', '-kmv', self.snaps[s]['snap_mnt']]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
cmd = ['umount', self.snaps[s]['snap_mnt']]
(ret, out, err) = self.vcall(cmd)
udevadm_settle()
cmd = ['lvremove', '-A', 'n', '-f', self.snaps[s]['snap_dev']]
self.log.info(' '.join(cmd))
for i in range(1, 30):
out, err, ret = justcall(cmd)
if ret == 0:
break
err_l1 = err.split('\n')
err_l2 = []
out_l = out.split('\n')
for e in err_l1:
if 'This metadata update is NOT backed up' in e:
pass
elif 'Falling back to direct link removal.' in e:
out_l.append(e)
elif 'Falling back to direct node removal.' in e:
out_l.append(e)
else:
err_l2.append(e)
err = '\n'.join(err_l2)
out = '\n'.join(out_l)
if len(out) > 0:
self.log.info(out)
if len(err) > 0:
self.log.error(err)
if ret != 0:
self.log.error("failed to remove snapshot %s (attempts: %d)"%(self.snaps[s]['snap_dev'], i))
elif i > 1:
self.log.info("successfully removed snapshot %s (attempts: %d)"%(self.snaps[s]['snap_dev'], i))
del(self.snaps[s])
opensvc-1.8~20170412/lib/rcAsset.py 0000644 0001750 0001750 00000060212 13073467726 017010 0 ustar jkelbert jkelbert from rcGlobalEnv import rcEnv
import os
from subprocess import *
import datetime
from rcUtilities import try_decode, justcall, which
class Asset(object):
s_config = "node configuration file"
s_probe = "probe"
s_default = "default"
def __init__(self, node):
self.node = node
def get_mem_bytes(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'mem_bytes')
source = self.s_config
except:
try:
s = self._get_mem_bytes()
source = self.s_probe
except:
pass
self.print_mem_bytes(s, source)
return s
def print_mem_bytes(self, s, source):
print("mem (%s)"%source)
print(" %s MB"%s)
def get_mem_banks(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'mem_banks')
source = self.s_config
except:
try:
s = self._get_mem_banks()
source = self.s_probe
except:
pass
self.print_mem_banks(s, source)
return s
def print_mem_banks(self, s, source):
print("mem banks (%s)"%source)
print(" %s"%s)
def get_mem_slots(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'mem_slots')
source = self.s_config
except:
try:
s = self._get_mem_slots()
source = self.s_probe
except:
pass
self.print_mem_slots(s, source)
return s
def print_mem_slots(self, s, source):
print("mem slots (%s)"%source)
print(" %s"%s)
def get_os_vendor(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'os_vendor')
source = self.s_config
except:
try:
s = self._get_os_vendor()
source = self.s_probe
except:
pass
self.print_os_vendor(s, source)
return s
def print_os_vendor(self, s, source):
print("os vendor (%s)"%source)
print(" %s"%s)
def get_os_release(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'os_release')
source = self.s_config
except:
try:
s = self._get_os_release()
source = self.s_probe
except:
pass
self.print_os_release(s, source)
return s
def print_os_release(self, s, source):
print("os release (%s)"%source)
print(" %s"%s)
def get_os_kernel(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'os_kernel')
source = self.s_config
except:
try:
s = self._get_os_kernel()
source = self.s_probe
except:
pass
self.print_os_kernel(s, source)
return s
def print_os_kernel(self, s, source):
print("os kernel (%s)"%source)
print(" %s"%s)
def get_os_arch(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'os_arch')
source = self.s_config
except:
try:
s = self._get_os_arch()
source = self.s_probe
except:
pass
self.print_os_arch(s, source)
return s
def print_os_arch(self, s, source):
print("os arch (%s)"%source)
print(" %s"%s)
def get_cpu_freq(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'cpu_freq')
source = self.s_config
except:
try:
s = self._get_cpu_freq()
source = self.s_probe
except:
pass
self.print_cpu_freq(s, source)
return s
def print_cpu_freq(self, s, source):
print("cpu freq (%s)"%source)
print(" %s Mhz"%s)
def get_cpu_threads(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'cpu_threads')
source = self.s_config
except:
try:
s = self._get_cpu_threads()
source = self.s_probe
except:
pass
self.print_cpu_threads(s, source)
return s
def print_cpu_threads(self, s, source):
print("cpu threads (%s)"%source)
print(" %s"%s)
def get_cpu_cores(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'cpu_cores')
source = self.s_config
except:
try:
s = self._get_cpu_cores()
source = self.s_probe
except:
pass
self.print_cpu_cores(s, source)
return s
def print_cpu_cores(self, s, source):
print("cpu cores (%s)"%source)
print(" %s"%s)
def get_cpu_dies(self):
s = '0'
source = self.s_default
try:
s = self.node.config.get('node', 'cpu_dies')
source = self.s_config
except:
try:
s = self._get_cpu_dies()
source = self.s_probe
except:
pass
self.print_cpu_dies(s, source)
return s
def print_cpu_dies(self, s, source):
print("cpu dies (%s)"%source)
print(" %s"%s)
def get_cpu_model(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'cpu_model')
source = self.s_config
except:
try:
s = self._get_cpu_model()
source = self.s_probe
except:
pass
self.print_cpu_model(s, source)
return s
def print_cpu_model(self, s, source):
print("cpu model (%s)"%source)
print(" %s"%s)
def get_serial(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'serial')
source = self.s_config
except:
try:
s = self._get_serial()
source = self.s_probe
except:
pass
self.print_serial(s, source)
return s
def print_serial(self, s, source):
print("serial (%s)"%source)
print(" %s"%s)
def get_bios_version(self):
s = ''
source = self.s_default
try:
s = self.node.config.get('node', 'bios_version')
source = self.s_config
except:
try:
s = self._get_bios_version()
source = self.s_probe
except:
pass
self.print_bios_version(s, source)
return s
def print_bios_version(self, s, source):
print("bios version (%s)"%source)
print(" %s"%s)
def get_sp_version(self):
s = ''
source = self.s_default
try:
s = self.node.config.get('node', 'sp_version')
source = self.s_config
except:
try:
s = self._get_sp_version()
source = self.s_probe
except:
pass
self.print_sp_version(s, source)
return s
def print_sp_version(self, s, source):
print("sp version (%s)"%source)
print(" %s"%s)
def get_enclosure(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'enclosure')
source = self.s_config
except:
try:
s = self._get_enclosure()
source = self.s_probe
except:
pass
self.print_enclosure(s, source)
return s
def print_enclosure(self, s, source):
print("enclosure (%s)"%source)
print(" %s"%s)
def get_tz(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'tz')
source = self.s_config
except:
try:
s = self._get_tz()
source = self.s_probe
except Exception as e:
print(e)
pass
if s:
self.print_tz(s, source)
return s
def _get_tz(self):
cmd = ["date", "+%z"]
out, err, ret = justcall(cmd)
out = out.strip()
if len(out) != 5:
return
return out[:3] + ":" + out[3:]
def print_tz(self, s, source):
print("timezone (%s)"%source)
print(" %s"%s)
def get_connect_to(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'connect_to')
source = self.s_config
except:
try:
s = self._get_connect_to()
source = self.s_probe
except:
pass
if s:
self.print_connect_to(s, source)
return s
def _get_connect_to(self):
if self.data["model"] != "Google":
return
if not which("gcloud"):
return
cmd = ["gcloud", "compute", "instances", "describe", "-q", "--format", "json", rcEnv.nodename]
out, err, ret = justcall(cmd)
"""
"networkInterfaces": [
{
"accessConfigs": [
{
"kind": "compute#accessConfig",
"name": "external-nat",
"natIP": "23.251.137.71",
"type": "ONE_TO_ONE_NAT"
}
],
"name": "nic0",
"networkIP": "10.132.0.2",
}
"""
import json
try:
data = json.loads(out)
except:
return
nics = [d for d in data["networkInterfaces"] if len(d["accessConfigs"]) > 0]
if len(nics) == 0:
return
for nic in nics:
if nic["name"] == "nic0":
return nic["accessConfigs"][0]["natIP"]
return nics[0]["accessConfigs"][0]["natIP"]
def print_connect_to(self, s, source):
print("connect to address (%s)"%source)
print(" %s"%s)
def get_model(self):
s = 'Unknown'
source = self.s_default
try:
s = self.node.config.get('node', 'model')
source = self.s_config
except:
try:
s = self._get_model()
source = self.s_probe
except:
pass
self.print_model(s, source)
return s
def print_model(self, s, source):
print("model (%s)"%source)
print(" %s"%s)
def get_node_env(self):
s = 'TST'
source = self.s_default
try:
if self.node.config.has_option('node', 'env'):
s = self.node.config.get('node', 'env')
source = self.s_config
elif self.node.config.has_option('node', 'host_mode'):
# compat
s = self.node.config.get('node', 'host_mode')
source = self.s_config
except:
pass
self.print_node_env(s, source)
return s
def print_node_env(self, s, source):
print("environment (%s)"%source)
print(" %s"%s)
def get_sec_zone(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'sec_zone')
source = self.s_config
except:
pass
self.print_sec_zone(s, source)
return s
def print_sec_zone(self, s, source):
if s is None:
return
print("security zone (%s)"%source)
print(" %s"%s)
def get_asset_env(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'asset_env')
s = try_decode(s)
source = self.s_config
except:
pass
self.print_asset_env(s, source)
return s
def print_asset_env(self, s, source):
if s is None:
return
print("asset environment (%s)"%source)
print(" %s"%s)
def get_version(self):
s = self.node.agent_version()
self.print_version(s)
return s
def print_version(self, s):
print("agent version")
print(" %s"%s)
def get_listener_port(self):
s = str(rcEnv.listener_port)
source = self.s_default
try:
s = str(self.node.config.getint('listener', 'port'))
source = self.s_config
except:
pass
self.print_listener_port(s, source)
return s
def print_listener_port(self, s, source):
if s is None:
return
print("listener port (%s)"%source)
print(" %s"%s)
def print_generic_cf(self, s, source, title):
if s is None:
return
print("%s (%s)"%(title, source))
print(" %s"%s)
def get_loc_country(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_country')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location country")
return s
def get_loc_city(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_city')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location city")
return s
def get_loc_addr(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_addr')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location city")
return s
def get_loc_building(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_building')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location building")
return s
def get_loc_floor(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_floor')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location floor")
return s
def get_loc_room(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_room')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location room")
return s
def get_loc_rack(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_rack')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location rack")
return s
def get_loc_zip(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'loc_zip')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "location zip")
return s
def get_team_integ(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'team_integ')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "team integration")
return s
def get_team_support(self):
s = None
source = self.s_default
try:
s = self.node.config.get('node', 'team_support')
source = self.s_config
except:
pass
self.print_generic_cf(s, source, "team support")
return s
def get_hba(self):
try:
hba = self._get_hba()
except:
hba = []
self.print_hba(hba)
return hba
def print_hba(self, hba):
print("hba (probe)")
for h in hba:
print(" %-5s %s"%(h[1], h[0]))
def get_targets(self):
try:
s = self._get_targets()
except:
s = []
self.print_targets(s)
return s
def print_targets(self, targets):
print("paths to targets (probe)")
for t in targets:
print(" %s - %s"%t)
def get_uids(self):
return self.get_ids("/etc/passwd")
def get_gids(self):
return self.get_ids("/etc/group")
def get_ids(self, p):
if rcEnv.sysname == "Windows":
return []
if not os.path.exists(p):
return []
with open(p, 'r') as f:
buff = f.read()
d = []
for line in buff.split('\n'):
line = line.strip()
if line.startswith("#"):
continue
l = line.split(':')
if len(l) < 3:
continue
try:
i = int(l[2])
except:
continue
name = repr(l[0]).strip("'")
d.append((name, l[2]))
return d
def get_lan(self):
kwargs = {'mcast': True}
if rcEnv.sysname == 'HP-UX':
kwargs['hwaddr'] = True
rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname)
ifconfig = rcIfconfig.ifconfig(**kwargs)
lan = {}
for intf in ifconfig.intf:
if len(intf.hwaddr) == 0:
continue
if intf.hwaddr not in lan:
lan[intf.hwaddr] = []
if type(intf.ipaddr) == str and intf.ipaddr != '':
d = {'type': 'ipv4',
'intf': intf.name,
'addr': intf.ipaddr,
'mask': intf.mask,
'flag_deprecated': intf.flag_deprecated,
}
lan[intf.hwaddr] += [d]
elif type(intf.ipaddr) == list:
for i, ip in enumerate(intf.ipaddr):
if ip != '':
d = {'type': 'ipv4',
'intf': intf.name,
'addr': ip,
'mask': intf.mask[i],
'flag_deprecated': intf.flag_deprecated,
}
lan[intf.hwaddr] += [d]
for i, ip6 in enumerate(intf.ip6addr):
d = {'type': 'ipv6',
'intf': intf.name,
'addr': intf.ip6addr[i],
'mask': intf.ip6mask[i],
'flag_deprecated': intf.flag_deprecated,
}
lan[intf.hwaddr] += [d]
if intf.name in ifconfig.mcast_data:
for addr in ifconfig.mcast_data[intf.name]:
if ':' in addr:
addr_type = 'ipv6'
else:
addr_type = 'ipv4'
d = {'type': addr_type,
'intf': intf.name,
'addr': addr,
'mask': "",
'flag_deprecated': intf.flag_deprecated,
}
lan[intf.hwaddr] += [d]
self.print_lan(lan)
return lan
def print_lan(self, lan):
print("lan (probe)")
for h, l in lan.items():
for d in l:
if d['mask'] != "":
addr_mask = "%s/%s" % (d['addr'], d['mask'])
else:
addr_mask = d['addr']
s = " %s %-8s %-5s %s"%(h, d['intf'], d['type'], addr_mask)
if d['flag_deprecated']:
s += " (deprecated)"
print(s)
def get_last_boot(self):
os.environ["LANG"] = "C"
cmd = ["/usr/bin/uptime"]
p = Popen(cmd, stdout=PIPE)
out, err = p.communicate()
l = out.split()
i = 0
for s in ("days,", "day(s),"):
try:
i = l.index(s)
break
except:
pass
if i == 0:
last = datetime.datetime.now()
else:
try:
last = datetime.datetime.now() - datetime.timedelta(days=int(l[i-1]))
except:
return
last = last.strftime("%Y-%m-%d")
self.print_last_boot(last)
return last
def print_last_boot(self, last):
print("last boot (probe)")
print(" %s" % last)
def get_asset_dict(self):
self.data = {}
self.data['nodename'] = rcEnv.nodename
self.data['fqdn'] = rcEnv.fqdn
self.data['version'] = self.get_version()
self.data['os_name'] = rcEnv.sysname
self.data['os_vendor'] = self.get_os_vendor()
self.data['os_release'] = self.get_os_release()
self.data['os_kernel'] = self.get_os_kernel()
self.data['os_arch'] = self.get_os_arch()
self.data['mem_bytes'] = self.get_mem_bytes()
self.data['mem_banks'] = self.get_mem_banks()
self.data['mem_slots'] = self.get_mem_slots()
self.data['cpu_freq'] = self.get_cpu_freq()
self.data['cpu_threads'] = self.get_cpu_threads()
self.data['cpu_cores'] = self.get_cpu_cores()
self.data['cpu_dies'] = self.get_cpu_dies()
self.data['cpu_model'] = self.get_cpu_model()
self.data['serial'] = self.get_serial()
self.data['model'] = self.get_model()
self.data['bios_version'] = self.get_bios_version()
self.data['sp_version'] = self.get_sp_version()
self.data['node_env'] = self.get_node_env()
self.data['enclosure'] = self.get_enclosure()
self.data['listener_port'] = self.get_listener_port()
connect_to = self.get_connect_to()
if connect_to is not None:
self.data['connect_to'] = connect_to
last_boot = self.get_last_boot()
if last_boot is not None:
self.data['last_boot'] = last_boot
sec_zone = self.get_sec_zone()
if sec_zone is not None:
self.data['sec_zone'] = sec_zone
asset_env = self.get_asset_env()
if asset_env is not None:
self.data['asset_env'] = asset_env
tz = self.get_tz()
if tz is not None:
self.data['tz'] = tz
loc_country = self.get_loc_country()
if loc_country is not None:
self.data['loc_country'] = loc_country
loc_city = self.get_loc_city()
if loc_city is not None:
self.data['loc_city'] = loc_city
loc_building = self.get_loc_building()
if loc_building is not None:
self.data['loc_building'] = loc_building
loc_room = self.get_loc_room()
if loc_room is not None:
self.data['loc_room'] = loc_room
loc_rack = self.get_loc_rack()
if loc_rack is not None:
self.data['loc_rack'] = loc_rack
loc_addr = self.get_loc_addr()
if loc_addr is not None:
self.data['loc_addr'] = loc_addr
loc_floor = self.get_loc_floor()
if loc_floor is not None:
self.data['loc_floor'] = loc_floor
loc_zip = self.get_loc_zip()
if loc_zip is not None:
self.data['loc_zip'] = loc_zip
team_integ = self.get_team_integ()
if team_integ is not None:
self.data['team_integ'] = team_integ
team_support = self.get_team_support()
if team_support is not None:
self.data['team_support'] = team_support
hba = self.get_hba()
if hba is not None:
self.data['hba'] = hba
targets = self.get_targets()
if targets is not None:
self.data['targets'] = targets
lan = self.get_lan()
if lan is not None:
self.data['lan'] = lan
uids = self.get_uids()
if uids is not None:
self.data['uids'] = uids
gids = self.get_gids()
if gids is not None:
self.data['gids'] = gids
return self.data
opensvc-1.8~20170412/lib/rcGlobalEnv.py 0000644 0001750 0001750 00000020571 13073467726 017606 0 ustar jkelbert jkelbert """module rcGlobalEnv module define rcEnv class
rcEnv class attribute may be updated with rcLocalEnv module if present
rcLocalEnv module is not provided with opensvc and allow customers to
redefine following vars:
o dbopensvc_host
o dbopensvc_port
o rsh
o rcp
rcLocalEnv.py may be installed into path_opensvc/lib
"""
import sys
import os
import platform
import socket
import uuid
import time
class Storage(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__repr__ = lambda self: '' % dict.__repr__(self)
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def get_osvc_paths(osvc_root_path=None, sysname=None, detect=False):
o = Storage()
if osvc_root_path:
o.pathsvc = osvc_root_path
elif detect:
o.pathsvc = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
else:
o.pathsvc = '/usr/share/opensvc'
if o.pathsvc == '/usr/share/opensvc':
o.pathlib = '/usr/share/opensvc/lib'
o.pathbin = '/usr/bin'
o.pathetc = '/etc/opensvc'
o.pathlog = '/var/log/opensvc'
o.pathtmp = '/var/tmp/opensvc'
o.pathvar = '/var/lib/opensvc'
o.pathdoc = '/usr/share/doc/opensvc'
o.pathlock = '/var/lib/opensvc/lock'
o.pathcron = '/usr/share/opensvc'
o.postinstall = '/usr/share/opensvc/bin/postinstall'
else:
o.pathlib = os.path.join(o.pathsvc, 'lib')
o.pathbin = os.path.join(o.pathsvc, 'bin')
o.pathetc = os.path.join(o.pathsvc, 'etc')
o.pathlog = os.path.join(o.pathsvc, 'log')
o.pathtmp = os.path.join(o.pathsvc, 'tmp')
o.pathvar = os.path.join(o.pathsvc, 'var')
o.pathdoc = os.path.join(o.pathsvc, 'usr', 'share', 'doc')
o.pathlock = os.path.join(o.pathvar, 'lock')
o.pathcron = o.pathbin
o.postinstall = os.path.join(o.pathbin, 'postinstall')
if str(sysname).lower() == "windows":
o.svcmgr = os.path.join(o.pathsvc, "svcmgr.cmd")
o.nodemgr = os.path.join(o.pathsvc, "nodemgr.cmd")
o.svcmon = os.path.join(o.pathsvc, "svcmon.cmd")
o.cron = os.path.join(o.pathsvc, "cron.cmd")
else:
o.svcmgr = os.path.join(o.pathbin, "svcmgr")
o.nodemgr = os.path.join(o.pathbin, "nodemgr")
o.svcmon = os.path.join(o.pathbin, "svcmon")
o.cron = os.path.join(o.pathcron, "cron")
o.nodeconf = os.path.join(o.pathetc, "node.conf")
o.authconf = os.path.join(o.pathetc, "auth.conf")
o.pathcomp = os.path.join(o.pathvar, "compliance")
o.drp_path = os.path.join(o.pathvar, "cache")
return o
def create_or_update_dir(d):
if not os.path.exists(d):
os.makedirs(d)
else:
# update tmpdir timestamp to avoid tmpwatch kicking-in while we run
now = time.time()
try:
os.utime(d, (now, now))
except:
# unprivileged
pass
class rcEnv:
"""Class to store globals
"""
session_uuid = str(uuid.uuid4())
node_env = "TST"
allowed_svc_envs = [
'DEV',
'DRP',
'FOR',
'INT',
'PRA',
'PRD',
'PRJ',
'PPRD',
'REC',
'STG',
'TMP',
'TST',
'UAT',
]
_platform = sys.platform
sysname, x, x, x, machine, x = platform.uname()
nodename = socket.gethostname().lower()
fqdn = socket.getfqdn().lower()
listener_port = 1214
"""program used to execute remote command on other nodes or virtual hosts
"""
if _platform == "sunos5" :
if os.path.exists('/usr/local/bin/ssh'):
rsh = "/usr/local/bin/ssh -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
rcp = "/usr/local/bin/scp -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
else:
rsh = "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -n"
rcp = "/usr/bin/scp -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes"
elif os.path.exists('/etc/vmware-release'):
rsh = "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes"
rcp = "/usr/bin/scp -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes"
elif sysname == 'OSF1':
rsh = "ssh -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
rcp = "scp -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
else:
rsh = "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
rcp = "/usr/bin/scp -q -o StrictHostKeyChecking=no -o ForwardX11=no -o BatchMode=yes -o ConnectTimeout=10"
"""EZ-HA defines. EZ-HA does heartbeat, stonith, automatic service failover
ez_last_chance == True:
check_up_script_gen.sh will try a ping + RSH other node before stonith
ez_startapp_bg == True:
startapp in background if EZ-HA take-over is successful
"""
ez_path = "/usr/local/cluster"
ez_path_services = ez_path + "/conf/services"
ez_last_chance = True
ez_startapp_bg = True
"""Directory on DRP node where to store the PRD nodes files necessary
for takeover.
"""
drp_sync_excludes = [
'--exclude="/spice"',
'--exclude="/dbadm"',
'--exclude="*.dmp"',
'--exclude="*.dbf"',
'--exclude="*.rdo"',
'--exclude="*.log"',
'--exclude="*.Z"',
'--exclude="*.gz"',
'--exclude="*.tgz"',
'--exclude="*.tar"',
'--exclude="*.tmp"',
'--exclude="/oracle/ficimp"',
'--exclude="/oracle/tmp"',
'--exclude="/oracle/LOG/*"',
'--exclude="/oracle/product/*/network/log/listener*.log"',
]
drp_sync_etc_solaris = [
"/etc/inet",
"/etc/inetd.conf",
"/etc/defaultdomain",
"/etc/lp",
"/etc/printers.conf",
"/etc/system",
"/etc/auto_master",
"/etc/auto_home",
"/etc/hosts.equiv",
"/etc/pam.conf",
"/etc/cron.d",
]
drp_sync_etc_linux = [
# linux
"/etc/xinetd.d",
"/etc/xinetd.conf",
"/etc/sysconfig",
"/etc/cups",
"/etc/auto.master",
"/etc/auto.misc",
"/etc/listener.ora",
"/etc/oratab",
"/etc/sqlnet.ora",
"/etc/tnsnames.ora",
"/etc/yp.conf",
"/etc/pam.d",
"/etc/cron.allow",
"/etc/cron.deny",
]
drp_sync_etc_common = [
# common
"/etc/shadow",
"/etc/passwd",
"/etc/group",
"/etc/syslog.conf",
"/etc/services",
"/etc/hosts",
"/etc/nsswitch.conf",
"/etc/sudoers",
"/etc/project",
"/etc/user_attr",
"/etc/ssh",
"/etc/centrifydc",
"/etc/krb5*",
"/etc/sudoers",
]
drp_sync_misc = [
"/var/centrifydc",
"/var/opt/oracle",
"/var/spool/cron",
"/var/spool/cron/crontabs",
"/var/yp/binding",
"/usr/local/oraenv", "/usr/local/coraenv", "/usr/local/dbhome",
"/usr/local/etc/sudoers",
]
drp_sync_files = [
[drp_sync_etc_solaris + drp_sync_etc_linux + drp_sync_etc_common + drp_sync_misc, []],
[["/home/oracle", "/home/sybase", "/opt/oracle", "/opt/sybase"], drp_sync_excludes],
]
vt_cloud = ['vcloud', 'openstack', 'amazon']
vt_libvirt = ['kvm']
vt_vm = ['ldom', 'hpvm', 'kvm', 'xen', 'vbox', 'ovm', 'esx'] + vt_cloud
vt_container = ['zone', 'lxc', 'jail', 'vz', 'srp', 'docker']
vt_supported = vt_vm + vt_container
dbopensvc = "None"
dbopensvc_host = None
dbcompliance = "None"
dbcompliance_host = None
paths = get_osvc_paths(sysname=sysname, detect=True)
pathsvc = paths.pathsvc
pathlib = paths.pathlib
pathbin = paths.pathbin
pathetc = paths.pathetc
pathlog = paths.pathlog
pathtmp = paths.pathtmp
pathvar = paths.pathvar
pathdoc = paths.pathdoc
pathlock = paths.pathlock
pathcron = paths.pathcron
postinstall = paths.postinstall
svcmgr = paths.svcmgr
svcmon = paths.svcmon
nodemgr = paths.nodemgr
cron = paths.cron
nodeconf = paths.nodeconf
authconf = paths.authconf
pathcomp = paths.pathcomp
drp_path = paths.drp_path
create_or_update_dir(paths.pathtmp)
opensvc-1.8~20170412/lib/rcXtremio.py 0000644 0001750 0001750 00000066467 13073467726 017402 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
import os
import requests
import ConfigParser
import json
import rcExceptions as ex
from rcGlobalEnv import rcEnv, Storage
from rcUtilities import justcall,convert_size
from rcOptParser import OptParser
from optparse import Option
try:
requests.packages.urllib3.disable_warnings()
except:
pass
verify = False
PROG = "nodemgr array"
OPT = Storage({
"help": Option(
"-h", "--help", action="store_true", dest="parm_help",
help="show this help message and exit"),
"array": Option(
"-a", "--array", action="store", dest="array_name",
help="The name of the array, as defined in auth.conf"),
"cluster": Option(
"--cluster", action="store", dest="cluster",
help="The name or id of the arry cluster. Optional for single-cluster setups, mandatory otherwise"),
"name": Option(
"--name", action="store", dest="name",
help="The object name"),
"size": Option(
"--size", action="store", dest="size",
help="The disk size, expressed as a size expression like 1g, 100mib, ..."),
"tags": Option(
"--tag", action="append", dest="tags",
help="An object tag. Can be set multiple times."),
"blocksize": Option(
"--blocksize", type=int, action="store", dest="blocksize",
help="The exported disk blocksize in B"),
"alignment_offset": Option(
"--alignment-offset", type=int, action="store", dest="alignment_offset",
help="The alignment offset for Volumes of 512 LB size is between 0 and "
"7. If omitted, the offset value is 0. Volumes of logical block "
"size 4096 must not be defined with an offset."),
"small_io_alerts": Option(
"--small-io-alerts", action="store", dest="small_io_alerts",
choices=["enabled", "disabled"],
help="Enable or disable small input/output Alerts"),
"unaligned_io_alerts": Option(
"--unaligned-io-alerts", action="store", dest="unaligned_io_alerts",
choices=["enabled", "disabled"],
help="Enable or disable unaligned input/output Alerts"),
"vaai_tp_alerts": Option(
"--vaai-tp-alerts", action="store", dest="vaai_tp_alerts",
choices=["enabled", "disabled"],
help="Enable or disable VAAI TP Alerts"),
"access": Option(
"--access", action="store", dest="access",
choices=["no_access", "read_access", "write_access"],
help="A Volume is created with write access rights."
"Volumes can be modified after being created and"
"have their access levels' changed."),
"naa": Option(
"--naa", action="store", dest="naa",
help="The volume naa identifier"),
"mappings": Option(
"--mappings", action="append", dest="mappings",
help="A :,,... mapping used in add map in replacement of --targetgroup and --initiatorgroup. Can be specified multiple times."),
"initiators": Option(
"--initiators", action="append", dest="initiators",
help="An initiator id. Can be specified multiple times."),
"initiator": Option(
"--initiator", action="store", dest="initiator",
help="An initiator id"),
"targets": Option(
"--targets", action="append", dest="targets",
help="A target name to export the disk through. Can be set multiple times."),
"target": Option(
"--target", action="store", dest="target",
help="A target name or id"),
"targetgroup": Option(
"--targetgroup", action="store", dest="targetgroup",
help="A target group name or id"),
"initiatorgroup": Option(
"--initiatorgroup", action="store", dest="initiatorgroup",
help="The initiator group id or name"),
"volume": Option(
"--volume", action="store", dest="volume",
help="A volume name or id"),
"lun": Option(
"--lun", action="store", type=int, dest="lun",
help="Unique LUN identification, exposing the Volume to"
"the host"),
"mapping": Option(
"--mapping", action="store", type=int, dest="mapping",
help="A lun mapping index"),
})
GLOBAL_OPTS = [
OPT.array,
OPT.cluster,
]
DEPRECATED_ACTIONS = []
ACTIONS = {
"Add actions": {
"add_disk": {
"msg": "Add a volume",
"options": [
OPT.name,
OPT.size,
OPT.blocksize,
OPT.tags,
OPT.alignment_offset,
OPT.small_io_alerts,
OPT.unaligned_io_alerts,
OPT.vaai_tp_alerts,
OPT.access,
OPT.mappings,
],
},
"add_map": {
"msg": "Map a volume to an initiator group and target group",
"options": [
OPT.volume,
OPT.mappings,
OPT.initiatorgroup,
OPT.targetgroup,
],
},
"del_disk": {
"msg": "Delete a volume",
"options": [
OPT.volume,
],
},
"del_map": {
"msg": "Unmap a volume from an initiator group and target group",
"options": [
OPT.mapping,
OPT.volume,
OPT.initiatorgroup,
OPT.targetgroup,
],
},
"resize_disk": {
"msg": "Resize a volume",
"options": [
OPT.volume,
OPT.size,
],
},
},
"Low-level actions": {
"list_initiators": {
"msg": "List configured initiators",
"options": [
OPT.initiator,
],
},
"list_initiator_groups": {
"msg": "List configured initiator groups",
"options": [
OPT.initiatorgroup,
],
},
"list_initiators_connectivity": {
"msg": "List configured initiator groups",
},
"list_mappings": {
"msg": "List configured mappings",
"options": [
OPT.mapping,
OPT.volume,
],
},
"list_targets": {
"msg": "List configured targets",
"options": [
OPT.target,
],
},
"list_target_groups": {
"msg": "List configured target groups",
"options": [
OPT.targetgroup,
],
},
"list_volumes": {
"msg": "List configured volumes",
"options": [
OPT.volume,
],
},
},
}
class Arrays(object):
arrays = []
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = []
for s in conf.sections():
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "xtremio":
continue
try:
name = s
api = conf.get(s, 'api')
username = conf.get(s, 'username')
password = conf.get(s, 'password')
m += [(name, api, username, password)]
except:
print("error parsing section", s)
pass
del(conf)
done = []
for name, api, username, password in m:
if self.filtering and name not in self.objects:
continue
if name in done:
continue
self.arrays.append(Array(name, api, username, password))
done.append(name)
def __iter__(self):
for array in self.arrays:
yield(array)
def get_array(self, name):
for array in self.arrays:
if array.name == name:
return array
return None
class Array(object):
def __init__(self, name, api, username, password):
self.name = name
self.api = api
self.username = username
self.password = password
self.auth = (username, password)
self.keys = [
'clusters_details',
'volumes_details',
'targets_details',
]
self.tg_portname = {}
self.ig_portname = {}
def convert_ids(self, data):
if data is None:
return data
for key in data:
if not isinstance(key, str):
continue
if not key.endswith("-id"):
continue
try:
data[key] = int(data[key])
except ValueError:
pass
return data
def delete(self, uri, params=None, data=None):
headers = {"Cache-Control": "no-cache"}
data = self.convert_ids(data)
if not uri.startswith("http"):
uri = self.api + uri
response = requests.delete(uri, params=params, data=json.dumps(data),
auth=self.auth, verify=verify,
headers=headers)
if response.status_code == 200:
return
raise ex.excError(response.content)
def put(self, uri, params=None, data=None):
headers = {"Cache-Control": "no-cache"}
data = self.convert_ids(data)
if not uri.startswith("http"):
uri = self.api + uri
response = requests.put(uri, params=params, data=json.dumps(data), auth=self.auth,
verify=verify, headers=headers)
if response.status_code == 200:
return
raise ex.excError(response.content)
def post(self, uri, params=None, data=None):
headers = {"Cache-Control": "no-cache"}
data = self.convert_ids(data)
if not uri.startswith("http"):
uri = self.api + uri
response = requests.post(uri, params=params, data=json.dumps(data), auth=self.auth,
verify=verify, headers=headers)
ret = json.loads(response.content)
if response.status_code == 201:
return self.get(ret["links"][0]["href"])
raise ex.excError(response.content)
def get(self, uri, params=None):
headers = {"Cache-Control": "no-cache"}
if not uri.startswith("http"):
uri = self.api + uri
r = requests.get(uri, params=params, auth=self.auth, verify=verify)
return json.loads(r.content)
def get_clusters_details(self):
data = self.get("/clusters", params={"full": 1})
return json.dumps(data["clusters"], indent=8)
def get_targets_details(self):
data = self.get("/targets", params={"full": 1})
return json.dumps(data["targets"], indent=8)
def get_volumes_details(self):
data = self.get("/volumes", params={"full": 1})
return json.dumps(data["volumes"], indent=8)
def add_disk(self, name=None, size=None, blocksize=None, tags=None,
cluster=None, access=None, vaai_tp_alerts=None,
small_io_alerts=None, unaligned_io_alerts=None,
alignment_offset=None, mappings=None, **kwargs):
if name is None:
raise ex.excError("--name is mandatory")
if size == 0 or size is None:
raise ex.excError("--size is mandatory")
d = {
"vol-name": name,
"vol-size": str(convert_size(size, _to="MB"))+"M",
}
if cluster is not None:
d["cluster-id"] = cluster
if blocksize is not None:
d["lb-size"] = blocksize
if small_io_alerts is not None:
d["small-io-alerts"] = small_io_alerts
if unaligned_io_alerts is not None:
d["unaligned-io-alerts"] = unaligned_io_alerts
if access is not None:
d["vol-access"] = access
if vaai_tp_alerts is not None:
d["vaai-tp-alerts"] = vaai_tp_alerts
if alignment_offset is not None:
d["alignment-offset"] = alignment_offset
self.post("/volumes", data=d)
driver_data = {}
if mappings:
mappings_data = self.add_map(volume=name, mappings=mappings, cluster=cluster)
driver_data["volume"] = self.get_volumes(volume=name, cluster=cluster)["content"]
driver_data["mappings"] = mappings_data.values()
results = {
"driver_data": driver_data,
"disk_id": driver_data["volume"]["naa-name"],
"disk_devid": driver_data["volume"]["index"],
"mappings": {},
}
for ig, tg in list(mappings_data.keys()):
if ig not in self.ig_portname:
continue
for hba_id in self.ig_portname[ig]:
if tg not in self.tg_portname:
continue
for tgt_id in self.tg_portname[tg]:
results["mappings"][hba_id+":"+tgt_id] = {
"hba_id": hba_id,
"tgt_id": tgt_id,
"lun": mappings_data[(ig, tg)]["lun"],
}
self.push_diskinfo(results, name, size)
return results
def resize_disk(self, volume=None, size=None, cluster=None, **kwargs):
if volume is None:
raise ex.excError("--volume is mandatory")
if volume is "":
raise ex.excError("--volume can not be empty")
if size == 0 or size is None:
raise ex.excError("--size is mandatory")
if size.startswith("+"):
incr = convert_size(size.lstrip("+"), _to="KB")
data = self.get_volumes(cluster=cluster, volume=volume)
current_size = int(data["content"]["vol-size"])
size = str(current_size + incr)+"K"
d = {
"vol-size": str(convert_size(size, _to="MB"))+"M",
}
uri = "/volumes"
params = {}
if volume is not None:
try:
int(volume)
uri += "/"+str(volume)
except ValueError:
params["name"] = volume
if cluster is not None:
d["cluster-id"] = cluster
self.put(uri, params=params, data=d)
ret = self.get_volumes(volume=volume, cluster=cluster)
return ret
def get_volume_mappings(self, cluster=None, volume=None, **kwargs):
params = {"full": 1}
uri = "/lun-maps"
if volume is None:
raise ex.excError("--volume is mandatory")
data = self.get_volumes(cluster=cluster, volume=volume)
vol_name = data["content"]["name"]
params["filter"] = "vol-name:eq:"+vol_name
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
return data
def del_volume_mappings(self, cluster=None, volume=None, **kwargs):
data = self.get_volume_mappings(cluster=cluster, volume=volume)
for mapping in data["lun-maps"]:
self.del_map(cluster=cluster, mapping=mapping["index"])
def del_disk(self, cluster=None, volume=None, **kwargs):
if volume is None:
raise ex.excError("--volume is mandatory")
if volume == "":
raise ex.excError("volume can not be empty")
data = self.get_volumes(cluster=cluster, volume=volume)
if "content" not in data:
raise ex.excError("volume %s does not exist" % volume)
disk_id = data["content"]["naa-name"]
self.del_volume_mappings(cluster=cluster, volume=volume)
params = {}
uri = "/volumes"
try:
int(volume)
uri += "/"+str(volume)
except ValueError:
params["name"] = volume
if cluster is not None:
params["cluster-id"] = cluster
ret = self.delete(uri, params=params)
self.del_diskinfo(disk_id)
return ret
def convert_hba_id(self, hba_id):
hba_id = hba_id[0:2] + ":" + \
hba_id[2:4] + ":" + \
hba_id[4:6] + ":" + \
hba_id[6:8] + ":" + \
hba_id[8:10] + ":" + \
hba_id[10:12] + ":" + \
hba_id[12:14] + ":" + \
hba_id[14:16]
return hba_id
def get_hba_initiatorgroup(self, hba_id, cluster=None):
params = {"full": 1}
uri = "/initiators"
hba_id = self.convert_hba_id(hba_id)
params["filter"] = "port-address:eq:"+hba_id
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if len(data["initiators"]) == 0:
raise ex.excError("no initiator found with port-address=%s" % hba_id)
if len(data["initiators"][0]["ig-id"]) == 0:
raise ex.excError("initiator %s found in no initiatorgroup" % hba_id)
return data["initiators"][0]["ig-id"][-1]
def get_target_targetgroup(self, hba_id, cluster=None):
params = {"full": 1}
uri = "/targets"
hba_id = self.convert_hba_id(hba_id)
params["filter"] = "port-address:eq:"+hba_id
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if len(data["targets"]) == 0:
raise ex.excError("no target found with port-address=%s" % hba_id)
if len(data["targets"][0]["tg-id"]) == 0:
raise ex.excError("target %s found in no targetgroup" % hba_id)
return data["targets"][0]["tg-id"][-1]
def translate_mappings(self, mappings, cluster=None):
internal_mappings = {}
for mapping in mappings:
elements = mapping.split(":")
hba_id = elements[0]
targets = elements[-1].split(",")
ig = self.get_hba_initiatorgroup(hba_id)
if ig not in self.ig_portname:
self.ig_portname[ig] = []
self.ig_portname[ig].append(hba_id)
internal_mappings[ig] = set()
for target in targets:
tg = self.get_target_targetgroup(target, cluster=cluster)
if tg not in self.tg_portname:
self.tg_portname[tg] = []
self.tg_portname[tg].append(target)
internal_mappings[ig].add(tg)
return internal_mappings
def add_map(self, volume=None, mappings=None, initiatorgroup=None, targetgroup=None,
cluster=None, lun=None, **kwargs):
if volume is None:
raise ex.excError("--volume is mandatory")
results = {}
if mappings is not None and initiatorgroup is None:
internal_mappings = self.translate_mappings(mappings, cluster=cluster)
for ig, tgs in internal_mappings.items():
for tg in tgs:
map_data = self._add_map(volume=volume, initiatorgroup=ig, targetgroup=tg, cluster=cluster, lun=lun, **kwargs)
results[(ig, tg)] = map_data
else:
map_data = self._add_map(volume=volume, initiatorgroup=initiatorgroup, targetgroup=targetgroup, cluster=cluster, lun=lun, **kwargs)
results[(initiatorgroup, targetgroup)] = map_data
return results
def _add_map(self, volume=None, initiatorgroup=None, targetgroup=None,
cluster=None, lun=None, **kwargs):
if initiatorgroup is None:
raise ex.excError("--initiatorgroup is mandatory")
d = {
"vol-id": volume,
"ig-id": initiatorgroup,
}
if targetgroup is not None:
d["tg-id"] = targetgroup
if cluster is not None:
d["cluster-id"] = cluster
if lun is not None:
d["lun"] = lun
ret = self.post("/lun-maps", data=d)
return ret["content"]
def del_map(self, mapping=None, cluster=None, **kwargs):
if mapping is None:
raise ex.excError("--mapping is mandatory")
if mapping == "":
raise ex.excError("mapping can not be empty")
params = {}
uri = "/lun-maps"
if mapping is not None:
try:
int(mapping)
uri += "/"+str(mapping)
except ValueError:
params["name"] = mapping
if cluster is not None:
params["cluster-id"] = cluster
return self.delete(uri, params=params)
def list_target_groups(self, cluster=None, targetgroup=None, **kwargs):
params = {"full": 1}
uri = "/target-groups"
if targetgroup is not None:
try:
int(targetgroup)
uri += "/"+str(targetgroup)
except ValueError:
params["name"] = targetgroup
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if "target-groups" in data:
print(json.dumps(data["target-groups"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def get_initiators(self, cluster=None, initiator=None, **kwargs):
params = {"full": 1}
uri = "/initiators"
if initiator is not None:
try:
int(initiator)
uri += "/"+str(initiator)
except ValueError:
params["name"] = initiator
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
return data
def list_initiators(self, cluster=None, initiator=None, **kwargs):
data = self.get_initiators(cluster=cluster, initiator=initiator, **kwargs)
if "initiators" in data:
print(json.dumps(data["initiators"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def list_initiator_groups(self, cluster=None, initiatorgroup=None, **kwargs):
params = {"full": 1}
uri = "/initiator-groups"
if initiatorgroup is not None:
try:
int(initiatorgroup)
uri += "/"+str(initiatorgroup)
except ValueError:
params["name"] = initiatorgroup
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if "initiator-groups" in data:
print(json.dumps(data["initiator-groups"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def list_initiators_connectivity(self, cluster=None, **kwargs):
params = {}
uri = "/initiators-connectivity"
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def list_targets(self, cluster=None, target=None, **kwargs):
params = {"full": 1}
uri = "/targets"
if target is not None:
try:
int(target)
uri += "/"+str(target)
except ValueError:
params["name"] = target
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if "targets" in data:
print(json.dumps(data["targets"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def list_mappings(self, cluster=None, mapping=None, volume=None, **kwargs):
params = {"full": 1}
uri = "/lun-maps"
if mapping is not None:
try:
int(mapping)
uri += "/"+str(mapping)
except ValueError:
params["name"] = mapping
if volume is not None:
try:
int(volume)
params["filter"] = "vol-index:eq:"+str(volume)
print(params)
except ValueError:
params["filter"] = "vol-name:eq:"+volume
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
if "targets" in data:
print(json.dumps(data["lun-maps"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def get_volumes(self, cluster=None, volume=None, **kwargs):
params = {"full": 1}
uri = "/volumes"
if volume is not None:
try:
int(volume)
uri += "/"+str(volume)
except ValueError:
params["name"] = volume
if cluster is not None:
params["cluster-id"] = cluster
data = self.get(uri, params=params)
return data
def list_volumes(self, cluster=None, volume=None, **kwargs):
data = self.get_volumes(cluster=cluster, volume=volume, **kwargs)
if "volumes" in data:
print(json.dumps(data["volumes"], indent=8))
elif "content" in data:
print(json.dumps(data["content"], indent=8))
else:
print(json.dumps(data, indent=8))
def del_diskinfo(self, disk_id):
if disk_id in (None, ""):
return
if self.node is None:
return
try:
ret = self.node.collector_rest_delete("/disks/%s" % disk_id)
except Exception as exc:
raise ex.excError(str(exc))
if "error" in ret:
raise ex.excError(ret["error"])
return ret
def push_diskinfo(self, data, name, size):
if self.node is None:
return
if data["disk_id"] in (None, ""):
data["disk_id"] = self.name+"."+str(data["driver_info"]["volume"]["index"])
try:
ret = self.node.collector_rest_post("/disks", {
"disk_id": data["disk_id"],
"disk_devid": data["disk_devid"],
"disk_name": name,
"disk_size": convert_size(size, _to="MB"),
"disk_alloc": 0,
"disk_arrayid": self.name,
"disk_group": "default",
})
except Exception as exc:
raise ex.excError(str(exc))
if "error" in data:
raise ex.excError(ret["error"])
return ret
def do_action(action, array_name=None, node=None, **kwargs):
o = Arrays()
array = o.get_array(array_name)
if array is None:
raise ex.excError("array %s not found" % array_name)
if not hasattr(array, action):
raise ex.excError("not implemented")
array.node = node
ret = getattr(array, action)(**kwargs)
if ret is not None:
print(json.dumps(ret, indent=4))
def main(argv, node=None):
parser = OptParser(prog=PROG, options=OPT, actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
global_options=GLOBAL_OPTS)
options, action = parser.parse_args(argv)
kwargs = vars(options)
do_action(action, node=node, **kwargs)
if __name__ == "__main__":
try:
ret = main(sys.argv)
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
sys.exit(ret)
opensvc-1.8~20170412/lib/rcMountsSunOS.py 0000644 0001750 0001750 00000002165 13073467726 020151 0 ustar jkelbert jkelbert import rcMounts
from rcUtilities import *
class Mounts(rcMounts.Mounts):
df_one_cmd = ["df", "-l"]
def match_mount(self, i, dev, mnt):
"""Given a line of 'mount' output, returns True if (dev, mnt) matches
this line. Returns False otherwize. Also care about weirdos like loops
and binds, ...
"""
if i.mnt != mnt:
return False
if i.dev == dev:
return True
return False
def __init__(self):
self.mounts = []
(ret, out, err) = call(['mount','-p'], outdebug=False)
for line in out.split('\n'):
words=line.split()
if len(words) < 6 :
continue
elif words[1]+words[4] != '--' :
# ignore mount line with space in mountpoint or dev
continue
elif len(words) == 6 :
words.append('-')
dev, null, mnt, type, null, null, mnt_opt = words
m = rcMounts.Mount(dev, mnt, type, mnt_opt.strip('()'))
self.mounts.append(m)
if __name__ == "__main__" :
help(Mounts)
M=Mounts()
print(M)
opensvc-1.8~20170412/lib/resHb.py 0000644 0001750 0001750 00000003467 13073467726 016460 0 ustar jkelbert jkelbert import resources as Res
from rcGlobalEnv import rcEnv
import os
import rcExceptions as ex
import rcStatus
class Hb(Res.Resource):
""" HeartBeat ressource
"""
def cluster_files(self):
svcfile = os.path.join(rcEnv.pathetc, self.svc.svcname)
svcmgr = rcEnv.svcmgr
cluster_f = '.'.join((svcfile, 'cluster'))
stonith_f = '.'.join((svcfile, 'stonith'))
for f in (cluster_f, stonith_f):
if os.path.islink(f):
if os.path.exists(f):
if os.readlink(f) == svcmgr:
self.log.debug("%s: symlink ok."%f)
pass
else:
self.log.info("%s: symlink exists but points to wrong file. fix."%f)
os.unlink(f)
os.symlink(svcmgr, f)
else:
self.log.info("%s: broken link. fix."%f)
os.unlink(f)
os.symlink(svcmgr, f)
else:
if os.path.exists(f):
self.log.info("%s: regular file. fix."%f)
os.unlink(f)
os.symlink(svcmgr, f)
else:
self.log.info("%s: not regular file nor symlink. fix."%f)
os.symlink(svcmgr, f)
def __str__(self):
return "%s" % (Res.Resource.__str__(self))
def freeze(self):
pass
def thaw(self):
pass
def stop(self):
pass
def start(self):
pass
def shutdown(self):
raise ex.excEndAction("shutdown action is not allowed on heartbeat-driven services")
def __status(self, verbose=False):
return rcStatus.UNDEF
def _status(self, verbose=False):
self.cluster_files()
return self.__status(verbose)
opensvc-1.8~20170412/lib/resDiskRaw.py 0000644 0001750 0001750 00000026462 13073467726 017473 0 ustar jkelbert jkelbert import resDisk
import os
import rcStatus
import re
import pwd
import grp
import stat
import sys
import glob
import rcExceptions as ex
from rcUtilities import which, is_string, lazy
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
devs=set([]),
user=None,
group=None,
perm=None,
create_char_devices=False,
**kwargs):
self.label = "raw"
resDisk.Disk.__init__(self,
rid=rid,
name="raw",
type='disk.raw',
**kwargs)
self.user = user
self.group = group
self.perm = perm
self.create_char_devices = create_char_devices
self.original_devs = devs
self.devs = set([])
self.devs_not_found = set([])
self.dst_devs_not_found = set([])
self.major_minor_errs = set([])
self.devs_map = {}
def verify_dev(self, path):
# os specific plug
return True
def info(self):
self.validate_devs()
data = []
if self.create_char_devices:
data += [["create_char_devices", str(self.create_char_devices)]]
if self.user:
data += [["user", str(self.user)]]
if self.group:
data += [["group", str(self.group)]]
if self.perm:
data += [["perm", str(self.perm)]]
for dev in self.devs:
if dev in self.devs_map:
data += [["dev", dev+":"+self.devs_map[dev]]]
else:
data += [["dev", dev]]
return self.fmt_info(data)
def subst_container_root(self, path):
m = re.match("<(\w+)>", path)
if m is None:
return path
container_name = m.group(1)
for r in self.svc.get_resources("container"):
if hasattr(r, "name") and r.name == container_name:
if hasattr(r, "get_zonepath"):
# zone
container_root = r.get_zonepath()
elif hasattr(r, "get_rootfs"):
# lxc
container_root = r.get_rootfs()
else:
return path
path = re.sub("<\w+>", container_root, path)
break
return path
def validate_devs(self):
self.devs = set([])
self.devs_not_found = set([])
self.dst_devs_not_found = set([])
for dev in self.original_devs:
if ":" in dev:
try:
src, dst = dev.split(":")
except:
continue
if not os.path.exists(src) or not self.verify_dev(src):
self.devs_not_found.add(src)
continue
dst = self.subst_container_root(dst)
if not os.path.exists(dst):
self.dst_devs_not_found.add(dst)
self.devs_map[src] = dst
self.devs.add(src)
continue
l = set(glob.glob(dev))
if len(l) > 0:
for _dev in l:
if not self.verify_dev(_dev):
continue
self.devs.add(_dev)
else:
self.devs_not_found.add(dev)
def on_add(self):
try:
n = self.rid.split('#')[1]
except:
n = "0"
self.name = self.svc.svcname+".raw"+n
self.label = self.name
@lazy
def uid(self):
uid = self.user
if is_string(uid):
try:
info=pwd.getpwnam(uid)
uid = info[2]
except:
pass
return uid
@lazy
def gid(self):
gid = self.group
if is_string(gid):
try:
info=grp.getgrnam(gid)
gid = info[2]
except:
pass
return gid
def check_uid(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
if self.user is None:
return True
if self.uid is None:
if verbose:
self.status_log('user %s uid not found'%str(self.user))
return False
uid = os.stat(rdev).st_uid
if uid != self.uid:
if verbose:
self.status_log('%s uid should be %s but is %d'%(rdev, str(self.uid), uid))
return False
return True
def check_gid(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
if self.group is None:
return True
if self.gid is None:
if verbose:
self.status_log('group %s gid not found'%str(self.group))
return False
gid = os.stat(rdev).st_gid
if gid != self.gid:
if verbose:
self.status_log('%s gid should be %s but is %d'%(rdev, str(self.gid), gid))
return False
return True
def check_perm(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
try:
perm = oct(stat.S_IMODE(os.stat(rdev).st_mode))
except:
self.log.error('%s can not stat file'%rdev)
return False
perm = str(perm).lstrip("0o").lstrip("0")
if perm != str(self.perm):
if verbose:
self.status_log('%s perm should be %s but is %s'%(rdev, str(self.perm), perm))
return False
return True
def check_block(self, src, dst):
if src is None or dst is None:
return False
if not os.path.exists(src) or not os.path.exists(dst):
return False
src_st = os.stat(src)
dst_st = os.stat(dst)
r = False
if os.major(src_st.st_rdev) != os.major(dst_st.st_rdev) or \
os.minor(src_st.st_rdev) != os.minor(dst_st.st_rdev):
self.major_minor_errs.add(dst)
r |= True
return not r
def fix_ownership(self, path):
self.fix_ownership_user(path)
self.fix_ownership_group(path)
def fix_ownership_user(self, path):
if self.user is None:
return
if self.uid is None:
raise ex.excError("user %s does not exist" % str(self.user))
if not self.check_uid(path):
self.vcall(['chown', str(self.uid), path])
else:
self.log.info("%s has correct user ownership (%s)"% (path, str(self.uid)))
def fix_ownership_group(self, path):
if self.group is None:
return
if self.gid is None:
raise ex.excError("group %s does not exist" % str(self.group))
if self.gid and not self.check_gid(path):
self.vcall(['chgrp', str(self.gid), path])
else:
self.log.info("%s has correct group ownership (%s)"% (path, str(self.gid)))
def fix_perms(self, path):
if self.uid is None:
return
if not self.check_perm(path):
self.vcall(['chmod', self.perm, path])
else:
self.log.info("%s has correct permissions (%s)"% (path, str(self.perm)))
def mangle_devs_map(self):
pass
def has_it_char_devices(self):
return True
def has_it_devs_map(self):
r = False
if len(self.dst_devs_not_found) == len(self.devs_map):
# all dst unlinked: report no error => down state
r |= True
elif len(self.dst_devs_not_found) > 0:
self.status_log("%s dst devs not found"%', '.join(self.dst_devs_not_found))
r |= True
for src, dst in self.devs_map.items():
r |= not self.has_it_dev_map(src, dst)
return not r
def has_it_dev_map(self, src, dst):
r = False
if not self.check_block(src, dst):
r |= True
if not self.check_uid(dst, verbose=True):
r |= True
elif not self.check_gid(dst, verbose=True):
r |= True
elif not self.check_perm(dst, verbose=True):
r |= True
return not r
def has_it(self):
"""Returns True if all raw devices are present and correctly
named
"""
r = False
if self.create_char_devices:
r |= not self.has_it_char_devices()
if len(self.devs_map) > 0:
r |= not self.has_it_devs_map()
if len(self.devs_not_found) > 0:
self.status_log("%s not found"%', '.join(self.devs_not_found))
r |= True
if len(self.major_minor_errs) > 0:
self.status_log("%s have major:minor diff with their src"%', '.join(self.devs_not_found))
r |= True
return not r
def is_up(self):
"""Returns True if the volume group is present and activated
"""
return self.has_it()
def _status(self, verbose=False):
self.validate_devs()
self.mangle_devs_map()
r = self.is_up()
if not self.create_char_devices and len(self.devs_map) == 0:
return rcStatus.NA
if r:
return self.status_stdby(rcStatus.UP)
else:
return self.status_stdby(rcStatus.DOWN)
def do_start(self):
self.validate_devs()
self.can_rollback = True
self.do_start_char_devices()
self.mangle_devs_map()
self.do_start_blocks()
def do_start_block(self, src, dst):
if src is not None:
if not os.path.exists(src):
raise ex.excError("src file %s does not exist" % src)
d = os.path.dirname(dst)
if not os.path.exists(d):
self.log.info("create dir %s" % d)
os.makedirs(d)
if not os.path.exists(dst):
src_st = os.stat(src)
if stat.S_ISBLK(src_st.st_mode):
t = "b"
elif stat.S_ISCHR(src_st.st_mode):
t = "c"
else:
raise ex.excError("%s is not a block nor a char device" % src)
major = os.major(src_st.st_rdev)
minor = os.minor(src_st.st_rdev)
cmd = ["mknod", dst, t, str(major), str(minor)]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.fix_ownership(dst)
self.fix_perms(dst)
def do_start_blocks(self):
if which("mknod") is None:
raise ex.excError("mknod not found")
for src, dst in self.devs_map.items():
self.do_start_block(src, dst)
def do_start_char_devices(self):
pass
def do_stop_char_devices(self):
pass
def do_stop(self):
self.validate_devs()
self.mangle_devs_map()
self.do_stop_blocks()
self.do_stop_char_devices()
def do_stop_blocks(self):
for src, dst in self.devs_map.items():
self.do_stop_block(src, dst)
def do_stop_block(self, src, dst):
if src is None:
# never unlink unmapped devs
return
if os.path.exists(dst):
self.log.info("unlink %s" % dst)
try:
os.unlink(dst)
except Exception as e:
raise ex.excError(str(e))
else:
self.log.info("%s already unlinked" % dst)
def disklist(self):
return self.devs
opensvc-1.8~20170412/lib/resHbVcs.py 0000644 0001750 0001750 00000001223 13073467726 017120 0 ustar jkelbert jkelbert import resHb
import rcStatus
from rcGlobalEnv import rcEnv
import rcExceptions as ex
class Hb(resHb.Hb):
def __init__(self, rid=None, name=None, **kwargs):
resHb.Hb.__init__(self, rid, type="hb.vcs", **kwargs)
self.label = name
def _status(self, verbose=False):
try:
s = self.svc.get_grp_val('State').strip('|')
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if s == "ONLINE":
return rcStatus.UP
elif s == "OFFLINE":
return rcStatus.DOWN
else:
self.status_log(s)
return rcStatus.WARN
opensvc-1.8~20170412/lib/rcDevTreeSunOS.py 0000644 0001750 0001750 00000024231 13073467726 020220 0 ustar jkelbert jkelbert import rcDevTree
import glob
import os
import re
from subprocess import *
from rcUtilities import which
from rcGlobalEnv import rcEnv
import rcDevTreeVeritas
class DevTree(rcDevTreeVeritas.DevTreeVeritas, rcDevTree.DevTree):
di = None
zpool_members = {}
zpool_used = {}
zpool_used_zfs = {}
zpool_size = {}
zpool_datasets = {}
zpool_datasets_used = {}
def load_partitions(self, d):
"""
* First Sector Last
* Partition Tag Flags Sector Count Sector Mount Directory
0 2 00 16779312 54281421 71060732
1 3 01 0 16779312 16779311
2 5 00 0 71127180 71127179
7 0 00 71060733 66447 71127179
"""
p = Popen(["prtvtoc", d.devpath[0]], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.split("\n"):
line = line.strip()
if line.startswith('*'):
continue
if line.startswith('2'):
continue
l = line.split()
if len(l) < 6:
continue
partname = d.devname + 's' + l[0]
partpath = d.devpath[0][:-1] + l[0]
partsize = self.di.get_part_size(partpath)
p = self.add_dev(partname, partsize, "linear")
p.set_devpath(partpath)
self.add_device_devpath(p, partpath)
p.set_devpath(partpath.replace("/dev/rdsk/", "/dev/dsk/"))
self.add_device_devpath(p, partpath)
d.add_child(partname)
p.add_parent(d.devname)
def add_device_devpath(self, dev, path):
if os.path.islink(path):
altpath = os.path.realpath(path)
if altpath != path:
dev.set_devpath(altpath)
def load_disks(self):
self.load_vxdisk_cache()
if len(self.vxdisk_cache) > 0:
self.load_vxdisk()
else:
self.load_format()
def load_vxdisk(self):
for devpath, data in self.vxdisk_cache.items():
if "size" not in data or "devpath" not in data:
continue
devname = os.path.basename(devpath)
bdevpath = devpath.replace("/rdsk/", "/dsk/").replace("/rdmp/", "/dmp/")
size = data["size"]
d = self.add_dev(devname, size, "linear")
d.set_devpath(data["devpath"])
d.set_devpath(devpath)
d.set_devpath(bdevpath)
self.add_device_devpath(d, devpath)
self.add_device_devpath(d, bdevpath)
self.load_partitions(d)
def load_format(self):
"""
0. c3t0d0
/pci@1f,700000/scsi@2/sd@0,0
4. c5t600508B4000971CD00010000024A0000d0 EVA_SAVE
/scsi_vhci/ssd@g600508b4000971cd00010000024a0000
"""
p = Popen(["format", "-e"], stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(input=None)
for line in out.split("\n"):
line = line.strip()
if re.match(r"[0-9]+\. ", line) is None:
continue
l = line.split()
devname = l[1]
devpath = '/dev/rdsk/'+devname+'s2'
bdevpath = devpath.replace("/rdsk/", "/dsk/")
size = self.di.get_size(devpath)
d = self.add_dev(devname, size, "linear")
d.set_devpath(devpath)
d.set_devpath(bdevpath)
self.add_device_devpath(d, devpath)
self.add_device_devpath(d, bdevpath)
self.load_partitions(d)
def load_sds(self):
if not os.path.exists("/usr/sbin/metastat"):
return
p = Popen(["metastat", "-p"], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
lines = out.split('\n')
lines.reverse()
"""
# metastat -p
d11 -m d2 d3 1
d2 1 1 c3t0d0s1
d3 1 1 c3t1d0s1
"""
for line in lines:
l = line.split()
if len(l) < 3:
continue
childname = l[0]
childpath = "/dev/md/dsk/"+childname
childsize = self.di.get_size(childpath)
if l[1] == "-m":
childtype = "raid1"
else:
childtype = "linear"
childdev = self.add_dev(childname, childsize, childtype)
childdev.set_devpath(childpath)
if l[1] == "-m":
parentnames = l[2:-1]
else:
parentnames = [l[-1]]
for parentname in parentnames:
parentpath = "/dev/md/dsk/"+parentname
parentsize = self.di.get_size(parentpath)
parentdev = self.add_dev(parentname, parentsize, "linear")
childdev.add_parent(parentname)
parentdev.add_child(childname)
def load_zpool(self):
p = Popen(["zpool", "list", "-H"], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.split('\n'):
l = line.split()
if len(l) == 0:
continue
poolname = l[0]
self.load_zpool1(poolname)
def load_zpool1(self, poolname):
p = Popen(["zpool", "status", poolname], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
self.zpool_members[poolname] = []
for line in out.split('\n'):
l = line.split()
if len(l) != 5:
continue
if l[0] == 'NAME':
continue
if l[0] == poolname:
continue
devname = l[0]
# -d mode import ?
if hasattr(rcEnv, "pathvar") and devname.startswith(rcEnv.pathvar):
devname = devname.split('/')[-1]
d = self.get_dev(devname)
if d is None:
continue
self.zpool_members[poolname].append(d)
p = Popen(["zpool", "iostat", poolname], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
lines = out.split('\n')
lines = [l for l in lines if len(l) > 0]
self.zpool_used[poolname] = self.read_size(lines[-1].split()[1])
zpool_free = self.read_size(lines[-1].split()[2])
self.zpool_size[poolname] = self.zpool_used[poolname] + zpool_free
p = Popen(["zfs", "list", "-H", "-r", "-t", "filesystem", poolname], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
self.zpool_datasets[poolname] = []
self.zpool_datasets_used[poolname] = 0
for line in out.split('\n'):
l = line.split()
if len(l) == 0:
continue
zfsname = l[0]
size = self.read_size(l[1])
refer = self.read_size(l[3])
size -= refer
mnt = l[4]
if zfsname == poolname:
self.zpool_used_zfs[poolname] = size
continue
self.zpool_datasets[poolname].append((zfsname, size))
self.zpool_datasets_used[poolname] += size
p = Popen(["zfs", "list", "-H", "-t", "snapshot"], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return
for line in out.split('\n'):
l = line.split()
if len(l) == 0:
continue
zfsname = l[0]
if not zfsname.startswith(poolname+'/') and \
not zfsname.startswith(poolname+'@'):
continue
size = self.read_size(l[1])
#refer = self.read_size(l[3])
self.zpool_datasets[poolname].append((zfsname, size))
self.zpool_datasets_used[poolname] += size
rest = self.zpool_used_zfs[poolname] - self.zpool_datasets_used[poolname]
if rest < 0:
rest = 0
self.zpool_datasets[poolname].append((poolname, rest))
self.zpool_datasets_used[poolname] += rest
if self.zpool_datasets_used[poolname] == 0:
ratio = 0
else:
ratio = 1.0 * self.zpool_used[poolname] / self.zpool_datasets_used[poolname]
for zfsname, size in self.zpool_datasets[poolname]:
used = int(size*ratio)
d = self.add_dev(zfsname, used, "zfs")
d.set_devpath(zfsname)
for m in self.zpool_members[poolname]:
member_ratio = 1.0 * m.size / self.zpool_size[poolname]
d.add_parent(m.devname)
m.add_child(zfsname)
self.set_relation_used(m.devname, zfsname, int(used*member_ratio))
def read_size(self, s):
if s == '0':
return 0
unit = s[-1]
size = float(s[:-1].replace(',','.'))
if unit == 'K':
size = size / 1024
elif unit == 'M':
pass
elif unit == 'G':
size = size * 1024
elif unit == 'T':
size = size * 1024 * 1024
elif unit == 'P':
size = size * 1024 * 1024 * 1024
elif unit == 'Z':
size = size * 1024 * 1024 * 1024 * 1024
else:
raise Exception("unit not supported: %s"%unit)
return int(size)
def load(self, di=None):
if di is not None:
self.di = di
if self.di is None:
from rcDiskInfoSunOS import diskInfo
self.di = diskInfo(deferred=True)
self.load_disks()
self.load_zpool()
self.load_sds()
self.load_vx_dmp()
self.load_vx_vm()
def blacklist(self, devname):
bl = [r'^loop[0-9]*.*', r'^ram[0-9]*.*', r'^scd[0-9]*', r'^sr[0-9]*']
for b in bl:
if re.match(b, devname):
return True
return False
if __name__ == "__main__":
tree = DevTree()
tree.load()
#print(tree)
tree.print_tree_bottom_up()
#print(map(lambda x: x.alias, tree.get_top_devs()))
opensvc-1.8~20170412/lib/resContainerVbox.py 0000644 0001750 0001750 00000007724 13073467726 020710 0 ustar jkelbert jkelbert import resources as Res
import rcExceptions as ex
from rcUtilities import qcall
import resContainer
from rcGlobalEnv import rcEnv
import os
utilities = __import__('rcUtilities'+rcEnv.sysname)
class Vbox(resContainer.Container):
def __init__(self,
rid,
name,
guestos=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.vbox",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.shutdown_timeout = 240
#self.sshbin = '/usr/local/bin/ssh'
self.vminfo = None
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def get_vminfo(self):
if self.vminfo is not None:
return self.vminfo
cmd = ['VBoxManage', 'showvminfo', '--machinereadable', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
return None
h = {}
for line in out.split('\n'):
l = line.split('=')
if len(l) != 2:
continue
key = l[0].strip('"')
val = l[1].strip('"')
h[key] = val
self.vminfo = h
return self.vminfo
def files_to_sync(self):
a = []
vminfo = self.get_vminfo()
if vminfo is None:
return []
a.append(vminfo['CfgFile'])
a.append(vminfo['SnapFldr'])
a.append(vminfo['LogFldr'])
return a
def check_capabilities(self):
cmd = ['VBoxManage', '-v']
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
return True
def state(self, nodename):
cmd = ['VBoxManage', 'list', 'runningvms']
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd)
if ret != 0:
return None
for line in out.split('\n'):
l = line.split('"')
if len(l) < 2:
continue
if l[1] == self.name:
return 'on'
return 'off'
def ping(self):
return utilities.check_ping(self.addr)
def container_action(self, action, add=[]):
cmd = ['VBoxManage', action, self.name] + add
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_start(self):
state = self.state()
if state == 'None':
raise ex.excError
elif state == 'off':
self.container_action('startvm')
elif state == 'on':
self.log.info("container is already up")
def container_forcestop(self):
self.container_action('controlvm', ['poweroff'])
def container_stop(self):
state = self.state()
if state == 'None':
raise ex.excError
elif state == 'off':
self.log.info("container is already down")
if state == 'on' :
self.container_action('controlvm', ['acpipowerbutton'])
try:
self.log.info("wait for container shutdown")
self.wait_for_fn(self.is_shutdown, self.shutdown_timeout, 2)
except ex.excError:
self.container_forcestop()
def check_manual_boot(self):
return True
def is_shutdown(self):
state = self.state()
if state == 'off':
return True
return False
def is_down(self):
if self.state() == 'off':
return True
return False
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
if self.state(nodename) == 'on':
return True
return False
opensvc-1.8~20170412/lib/checkMpathHP-UX.py 0000644 0001750 0001750 00000004437 13073467726 020244 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "mpath"
svcdevs = {}
def find_svc(self, dev):
for svc in self.svcs:
if svc not in self.svcdevs:
try:
devs = svc.disklist()
except Exception as e:
devs = []
self.svcdevs[svc] = devs
if dev in self.svcdevs[svc]:
return svc.svcname
return ''
def do_check(self):
cmd = ['scsimgr', 'lun_map']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 1:
return self.undef
r = []
dev = None
wwid = None
for line in lines:
if "LUN PATH INFORMATION FOR LUN" in line:
# new mpath
# - store previous
# - reset path counter
if dev is not None and not dev.startswith('/dev/pt/pt') and wwid != '=' and "Virtual" not in proto:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
n = 0
l = line.split()
if len(l) < 2:
continue
dev = l[-1]
elif line.startswith("World Wide Identifier"):
wwid = line.split()[-1].replace("0x","")
elif line.startswith("SCSI transport protocol"):
proto = line.split("=")[-1]
elif line.startswith("State"):
state = line.split("=")[-1].strip()
elif line.startswith("Last Open or Close state"):
last_known_state = line.split("=")[-1].strip()
if state in ("ACTIVE", "STANDBY"):
n += 1
elif state == "UNOPEN" and last_known_state in ("ACTIVE", "STANDBY"):
n += 1
if dev is not None and not dev.startswith('/dev/pt/pt') and wwid != '=' and "Virtual" not in proto:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
return r
opensvc-1.8~20170412/lib/checkMpathPowerpathLinux.py 0000777 0001750 0001750 00000000000 13073467726 026674 2checkMpathPowerpath.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resShareNfsHP-UX.py 0000644 0001750 0001750 00000007321 13073467726 020413 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import justcall, which
import rcStatus
import rcExceptions as ex
from resources import Resource
class Share(Resource):
def get_opts(self):
if not os.path.exists(self.sharetab):
return self.data
with open(self.sharetab, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
words = line.split()
if len(words) != 4:
continue
path = words[0]
if path != self.path:
continue
res = words[1]
fstype = words[2]
if fstype != "nfs":
continue
opts = words[3]
return self.parse_opts(opts)
return ""
def is_up(self):
self.issues = ""
opts = self.get_opts()
if len(opts) == 0:
return False
if opts != self.opts:
self.issues = "%s exported with unexpected options: %s, expected %s"%(self.path, opts, self.opts)
return False
return True
def start(self):
try:
up = self.is_up()
except ex.excError as e:
self.log.error("skip start because the share is in unknown state")
return
if up:
self.log.info("%s is already up" % self.path)
return
if "unexpected options" in self.issues:
self.log.info("reshare %s because unexpected options were detected"%self.path)
cmd = [ 'unshare', '-F', 'nfs', self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError(err)
self.can_rollback = True
cmd = [ 'share', '-F', 'nfs', '-o', self.opts, self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError(err)
def stop(self):
try:
up = self.is_up()
except ex.excError as e:
self.log.error("continue with stop even if the share is in unknown state")
if not up:
self.log.info("%s is already down" % self.path)
return 0
cmd = [ 'unshare', '-F', 'nfs', self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def _status(self, verbose=False):
try:
up = self.is_up()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if len(self.issues) > 0:
self.status_log(self.issues)
return rcStatus.WARN
if rcEnv.nodename in self.always_on:
if up: return rcStatus.STDBY_UP
else: return rcStatus.STDBY_DOWN
else:
if up: return rcStatus.UP
else: return rcStatus.DOWN
def parse_opts(self, opts):
o = sorted(opts.split(','))
out = []
for e in o:
if e.startswith('ro=') or e.startswith('rw=') or e.startswith('access='):
opt, clients = e.split('=')
clients = ':'.join(sorted(clients.split(':')))
if len(clients) == 0:
continue
out.append('='.join((opt, clients)))
else:
out.append(e)
return ','.join(out)
def __init__(self, rid, path, opts, **kwargs):
Resource.__init__(self, rid, type="share.nfs", **kwargs)
self.sharetab = "/etc/dfs/sharetab"
self.dfstab = "/etc/dfs/dfstab"
if not which("share"):
raise ex.excInitError("share is not installed")
self.label = "nfs:"+path
self.path = path
try:
self.opts = self.parse_opts(opts)
except ex.excError as e:
raise ex.excInitError(str(e))
opensvc-1.8~20170412/lib/rcAssetWindows.py 0000644 0001750 0001750 00000011342 13073467726 020363 0 ustar jkelbert jkelbert import os
import sys
import platform
import datetime
from rcUtilities import justcall, which
from rcUtilitiesWindows import get_registry_value
import rcAsset
import ctypes
import wmi
from rcDiskInfoWindows import diskInfo
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [("dwLength", ctypes.c_uint),
("dwMemoryLoad", ctypes.c_uint),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),]
def __init__(self):
# have to initialize this to the size of MEMORYSTATUSEX
self.dwLength = 2*4 + 7*8 # size = 2 ints, 7 longs
return super(MEMORYSTATUSEX, self).__init__()
class Asset(rcAsset.Asset):
def __init__(self, node):
self.w = wmi.WMI()
self.cpuinfo = self.w.Win32_Processor()
rcAsset.Asset.__init__(self, node)
self.memstat = MEMORYSTATUSEX()
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(self.memstat))
def _get_tz(self):
# TODO: return in fmt "+01:00"
return
def _get_mem_bytes(self):
return str(self.memstat.ullTotalPhys // 1024 // 1024)
def _get_mem_banks(self):
md = len(self.w.WIN32_PhysicalMemory())
return str(md)
def _get_mem_slots(self):
n = 0
for a in self.w.WIN32_PhysicalMemoryArray():
n += a.MemoryDevices
return str(n)
def _get_os_vendor(self):
return 'Microsoft'
def _get_os_name(self):
return 'Windows'
def _get_os_release(self):
v = sys.getwindowsversion()
product = {
1: 'Workstation',
2: 'Domain Controller',
3: 'Server',
}
s = platform.release()
s = s.replace('Server', ' Server')
s = s.replace('ServerR', ' Server R')
s = s.replace('Workstation', ' Workstation')
s += " %s" % v.service_pack
return s
def _get_os_kernel(self):
v = sys.getwindowsversion()
return ".".join(map(str, [v.major, v.minor, v.build]))
def _get_os_arch(self):
return platform.uname()[4]
def _get_cpu_freq(self):
for i in self.cpuinfo:
cpuspeed = i.MaxClockSpeed
return str(cpuspeed)
def _get_cpu_cores(self):
n = 0
for p in self.cpuinfo:
try:
cores = p.NumberOfCores
except:
cores = 1
n += cores
return str(n)
def _get_cpu_dies(self):
s = set([])
for p in self.cpuinfo:
s.add(p.SocketDesignation)
n = len(s)
return str(n)
def _get_cpu_model(self):
for i in self.cpuinfo:
cputype = i.Name
return cputype
def _get_enclosure(self):
for i in self.w.Win32_SystemEnclosure():
name = i.Name
return name
def _get_serial(self):
for i in self.w.Win32_ComputerSystemProduct():
name = i.IdentifyingNumber
return name
def _get_model(self):
for i in self.w.Win32_ComputerSystemProduct():
name = i.Name
return name
def _get_hba(self):
hbas = []
self.di = diskInfo()
for index, portwwn, host in self.di._get_fc_hbas():
hbas.append((portwwn, 'fc'))
return hbas
def _get_targets(self):
maps = []
if not which('fcinfo'):
print(' fcinfo is not installed')
return []
for index, portwwn, host in self.di._get_fc_hbas():
cmd = ['fcinfo', '/mapping', '/ai:'+index]
out, err, ret = justcall(cmd)
if ret != 0:
print('error executing', ' '.join(cmd), out, err, ret)
continue
for line in out.split('\n'):
if not line.startswith('(x'):
continue
l = line.split()
if len(l) < 3:
continue
tgtportwwn = l[2].strip(',').replace(':', '')
if (portwwn, tgtportwwn) in maps:
continue
maps.append((portwwn, tgtportwwn))
return maps
def get_last_boot(self):
payload = self.w.Win32_PerfFormattedData_PerfOS_System()
uptime = payload[-1].SystemUpTime
try:
last = datetime.datetime.now() - datetime.timedelta(seconds=int(uptime))
except:
return
last = last.strftime("%Y-%m-%d")
self.print_last_boot(last)
return last
opensvc-1.8~20170412/lib/provDiskGce.py 0000644 0001750 0001750 00000005215 13073467726 017626 0 ustar jkelbert jkelbert from provisioning import Provisioning
import rcExceptions as ex
from rcUtilities import convert_size
from svcBuilder import conf_get_string_scope, conf_get_int_scope
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def provisioner(self):
for name in self.r.names:
self._provisioner(name)
self.r.log.info("provisioned")
self.r.get_disks(refresh=True)
self.r.start()
return True
def _provisioner(self, name):
disk_names = self.r.get_disk_names()
if name in disk_names:
self.r.log.info("gce disk name %s already provisioned" % name)
return
try:
size = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "size")
except:
raise ex.excError("gce disk name %s in %s: missing the 'size' parameter" % (name, self.r.rid))
size = str(convert_size(size, _to="MB"))+'MB'
cmd = ["gcloud", "compute", "disks", "create", "-q",
name,
"--size", size,
"--zone", self.r.gce_zone]
try:
description = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "description")
cmd += ["--description", description]
except:
pass
try:
image = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "image")
cmd += ["--image", image]
except:
pass
try:
source_snapshot = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "source_snapshot")
cmd += ["--source-snapshot", source_snapshot]
except:
pass
try:
image_project = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "image_project")
cmd += ["--image-project", image_project]
except:
pass
try:
disk_type = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "disk_type")
cmd += ["--type", disk_type]
except:
pass
self.r.vcall(cmd)
def unprovisioner(self):
self.r.stop()
for name in self.r.names:
self._unprovisioner(name)
self.r.log.info("unprovisioned")
return True
def _unprovisioner(self, name):
disk_names = self.r.get_disk_names()
if name not in disk_names:
self.r.log.info("gce disk name %s already unprovisioned" % name)
return
cmd = ["gcloud", "compute", "disks", "delete", "-q", name,
"--zone", self.r.gce_zone]
self.r.vcall(cmd)
opensvc-1.8~20170412/lib/resDiskLvVcsLinux.py 0000644 0001750 0001750 00000001422 13073467726 021004 0 ustar jkelbert jkelbert import resources as Res
import rcStatus
import rcExceptions as ex
class Disk(Res.Resource):
def __init__(self, rid=None, vgname=None, lvname=None, **kwargs):
Res.Resource.__init__(self, rid, "disk.lv", **kwargs)
self.name = '@'.join((lvname, vgname))
self.label = self.name
def start(self):
pass
def stop(self):
pass
def _status(self, verbose=False):
try:
s = self.svc.get_res_val(self.vcs_name, 'State')
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if s == "ONLINE":
return rcStatus.UP
elif s == "OFFLINE":
return rcStatus.DOWN
else:
self.status_log(s)
return rcStatus.WARN
opensvc-1.8~20170412/lib/rcAssetSunOS.py 0000644 0001750 0001750 00000017504 13073467726 017746 0 ustar jkelbert jkelbert import os
import datetime
import re
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import rcAsset
from rcZone import is_zone
class Asset(rcAsset.Asset):
def __init__(self, node=None):
rcAsset.Asset.__init__(self, node)
self.osver = 0.
self.zone = is_zone()
(out, err, ret) = justcall(['prtdiag'])
if ret != 0 and len(out) < 4:
self.prtdiag = []
else:
self.prtdiag = out.split('\n')
(out, err, ret) = justcall(['prtconf'])
if ret != 0 and len(out) < 4:
self.prtconf = []
else:
self.prtconf = out.split('\n')
def _get_mem_bytes(self):
for l in self.prtconf:
if 'Memory size:' in l:
return l.split(':')[-1].split()[0]
return '0'
def _get_mem_banks(self):
l = [e for e in self.prtdiag if 'DIMM' in e and 'in use' in e]
return str(len(l))
def _get_mem_slots(self):
l = [e for e in self.prtdiag if 'DIMM' in e]
return str(len(l))
def _get_os_vendor(self):
return 'Oracle'
def _get_os_name(self):
f = '/etc/release'
if os.path.exists(f):
(out, err, ret) = justcall(['cat', f])
if ret != 0:
return 'Unknown'
if 'OpenSolaris' in out:
return 'OpenSolaris'
return 'SunOS'
def _get_os_release(self):
f = '/etc/release'
if os.path.exists(f):
(out, err, ret) = justcall(['cat', f])
if ret != 0:
return 'Unknown'
return out.split('\n')[0].replace('OpenSolaris','').replace('Oracle', '').strip()
return 'Unknown'
def _get_os_kernel(self):
(out, err, ret) = justcall(['uname', '-v'])
if ret != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) == 0:
return 'Unknown'
try:
self.osver = float(lines[0])
except:
return lines[0]
if self.osver < 11.:
return lines[0]
else:
(out, err, ret) = justcall(['pkg', 'info', 'entire'])
if ret != 0:
return 'Unknown'
nfo = out.split('\n')
for l in nfo:
if 'Version: ' in l:
if 'SRU' in l:
return ' '.join([lines[0], 'SRU', l.split()[6].strip(')')])
elif lines[0] in l:
return l.split()[4].strip(')')
else:
return ' '.join([lines[0], l.split()[4]])
return 'Unknown'
def _get_os_arch(self):
(out, err, ret) = justcall(['uname', '-m'])
if ret != 0:
return 'Unknown'
return out.split('\n')[0]
def _get_cpu_freq(self):
(out, err, ret) = justcall(['/usr/sbin/psrinfo', '-pv'])
if ret != 0:
return '0'
for w in out.split():
if 'MHz)' in w:
return prev
prev = w
(out, err, ret) = justcall(['kstat', 'cpu_info'])
if ret != 0:
return '0'
l = out.split()
if 'clock_MHz' in l:
freq = l[l.index('clock_MHz')+1]
return freq
return '0'
def _get_cpu_cores(self):
cmd = ['kstat', 'cpu_info']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
core_ids = set([])
if "core_id" in out:
keyword = "core_id"
else:
keyword = "chip_id"
for line in out.split('\n'):
if not line.strip().startswith(keyword):
continue
core_ids.add(line.split()[-1])
return str(len(core_ids))
def _get_cpu_threads(self):
out, err, ret = justcall(['/usr/sbin/psrinfo'])
if ret != 0:
return '0'
return str(len(out.split('\n'))-1)
def _get_cpu_dies(self):
(out, err, ret) = justcall(['/usr/sbin/psrinfo', '-p'])
if ret != 0:
return '0'
return out.split('\n')[0]
def _get_cpu_model(self):
(out, err, ret) = justcall(['/usr/sbin/psrinfo', '-pv'])
if ret != 0:
return 'Unknown'
lines = out.split('\n')
lines = [line for line in lines if len(line) > 0]
if len(lines) == 0:
return 'Unknown'
model = lines[-1].strip()
if model.startswith('The '):
model = model.replace('The ', '')
known_garbage = [' (chipid', ' (portid', ' physical proc']
for s in known_garbage:
try:
i = model.index(s)
model = model[:i]
except ValueError:
continue
return model
def _get_serial(self):
if which("sneep"):
cmd = ['sneep']
else:
cmd = ['hostid']
(out, err, ret) = justcall(cmd)
if ret != 0:
return 'Unknown'
return out.split('\n')[0]
def _get_model(self):
if self.zone:
return "Solaris Zone"
for l in self.prtdiag:
if 'System Configuration:' in l:
return l.split(':')[-1].strip()
return 'Unknown'
def __get_hba(self):
# fc / fcoe
"""
# cfgadm -s match="exact,select=type(fc-fabric)"
Ap_Id Type Receptacle Occupant Condition
c5 fc-fabric connected configured unknown
"""
l = []
if not which('cfgadm'):
return []
if not which('luxadm'):
return []
cmd = ['cfgadm', '-lv', '-s', 'match=exact,select=type(fc-fabric)']
out, err, ret = justcall(cmd)
if ret != 0:
return []
words = out.split()
hba_names = [word for word in words if word.startswith("/devices/")]
if len(hba_names) == 0:
return []
hba_type = 'fc'
for hba_name in hba_names:
targets = []
cmd = ['luxadm', '-e', 'dump_map', hba_name]
out, err, ret = justcall(cmd)
if ret != 0:
continue
lines = out.split('\n')
if len(lines) < 2:
continue
for line in lines[1:]:
words = line.split()
if len(words) < 5:
continue
if 'Host Bus' in line:
hba_id = words[3]
else:
targets.append(words[3])
l.append((hba_id, hba_type, targets))
return l
def _get_hba(self):
l = self.__get_hba()
return map(lambda x: (x[0], x[1]), l)
def _get_targets(self):
l = self.__get_hba()
m = []
for hba_id, hba_type, targets in l:
for target in targets:
m.append((hba_id, target))
return m
def _get_bios_version(self):
arch = self._get_os_arch().lower()
if arch.startswith("sun4"):
return self._get_bios_version_sparc()
else:
return self._get_bios_version_intel()
def _get_bios_version_sparc(self):
for l in self.prtdiag:
if l.startswith("OBP "):
v = l.replace("OBP ", "").strip()
v = re.sub(' [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}', '', v)
return v
return ''
def _get_bios_version_intel(self):
if which('smbios') is None:
return ''
out, err, ret = justcall(['smbios'])
if ret != 0:
return ''
try:
i = out.index('BIOS information')
except ValueError:
return ''
for l in out[i:].splitlines():
if 'Version String' in l:
return l.split(':')[-1].strip()
return ''
if __name__ == "__main__":
print(Asset()._get_cpu_model())
opensvc-1.8~20170412/lib/rcPkgFreeBSD.py 0000644 0001750 0001750 00000001773 13073467726 017614 0 ustar jkelbert jkelbert from rcUtilities import justcall, which, cache
from rcGlobalEnv import rcEnv
def listpkg():
lines = list_pkg_info()
lines += list_pkg_query()
return lines
@cache("pkg_info")
def list_pkg_info():
if which('pkg_info') is None:
return []
cmd = ['pkg_info']
out, err, ret = justcall(cmd)
lines = []
for line in out.splitlines():
l = line.split()
if len(l) < 2:
continue
nv = l[0].split('-')
version = nv[-1]
pkgname = '-'.join(nv[0:-1])
x = [rcEnv.nodename, pkgname, version, '']
lines.append(x)
return lines
@cache("pkg_query")
def list_pkg_query():
if which('pkg') is None:
return []
cmd = ['pkg', 'query', '-a', '%n;;%v;;%q']
out, err, ret = justcall(cmd)
lines = []
for line in out.splitlines():
l = line.split(';;')
if len(l) < 3:
continue
x = [rcEnv.nodename] + l
lines.append(x)
return lines
def listpatch():
return []
opensvc-1.8~20170412/lib/rcDiskInfoSunOS.py 0000644 0001750 0001750 00000011533 13073467726 020371 0 ustar jkelbert jkelbert import rcDiskInfo
import rcDevTreeVeritas
from rcUtilities import justcall
import math
from rcGlobalEnv import rcEnv
from rcZone import is_zone
class diskInfo(rcDiskInfo.diskInfo):
h = {}
done = []
def get_val(self, line):
l = line.split(":")
if len(l) != 2:
return
return l[-1].strip()
def get_part_size(self, dev):
part = dev[-1]
size = 0
cmd = ['prtvtoc', dev]
(out, err, ret) = justcall(cmd)
if ret != 0:
return size
bytes_per_sect = 0
for line in out.split('\n'):
if not line.startswith('*'):
continue
if "bytes/sector" in line:
bytes_per_sect = int(line.split()[1])
if bytes_per_sect == 0:
return 0
for line in out.split('\n'):
if line.startswith('*'):
continue
l = line.split()
if len(l) != 6:
continue
if l[0] != part:
continue
return math.ceil(1.*int(l[4])*bytes_per_sect/1024/1024)
return 0
def get_size(self, dev):
size = 0
dev = dev.replace("/dev/dsk/", "/dev/rdsk/")
dev = dev.replace("/dev/vx/dmp/", "/dev/vx/rdmp/")
cmd = ['prtvtoc', dev]
(out, err, ret) = justcall(cmd)
if ret != 0:
return size
"""
* 512 bytes/sector
* 63 sectors/track
* 255 tracks/cylinder
* 16065 sectors/cylinder
* 19581 cylinders
* 19579 accessible cylinders
** OR:
* 188743612 accessible sectors
"""
for line in out.split('\n'):
if not line.startswith('*'):
continue
try:
if "bytes/sector" in line:
n1 = int(line.split()[1])
if "accessible sectors" in line:
s0 = int(line.split()[1])
size = math.ceil(1. * s0 * n1 / 1024 / 1024)
break
if "sectors/cylinder" in line:
n2 = int(line.split()[1])
if "cylinders" in line:
n3 = int(line.split()[1])
size = math.ceil(1. * n1 * n2 * n3 / 1024 / 1024)
except:
pass
return size
def __init__(self, deferred=False):
self.zone = is_zone()
self.deferred = deferred
if deferred:
return
self.scan()
def scan(self):
if 'scan' in self.done:
return
self.done.append('scan')
cmd = ["/usr/bin/find", "/dev/rdsk", "-name", "c*s2"]
(out, err, ret) = justcall(cmd)
if ret != 0:
return
lines = out.split('\n')
if len(lines) < 2:
return
for e in lines:
if "/dev/" not in e:
continue
dev = e.strip()
self.scan_dev(dev)
def scan_dev(self, dev):
dev = dev.replace("/dev/vx/dmp/", "/dev/vx/rdmp/")
if "dmp/" in dev:
wwid = rcDevTreeVeritas.DevTreeVeritas().vx_inq(dev)
vid = ""
pid = ""
size = 0
else:
cmd = ["mpathadm", "show", "lu", dev]
(out, err, ret) = justcall(cmd)
if ret != 0:
return
if "Error: Logical-unit " + dev + " is not found" in err:
dsk = dev.replace("/dev/rdsk/", "")
dsk = dsk.replace("s2", "")
wwid = rcEnv.nodename + "." + dsk
vid = "LOCAL"
pid = ""
size = 0
else:
wwid = ""
vid = ""
pid = ""
size = 0
for line in out.split('\n'):
if line.startswith("\tVendor:"):
vid = self.get_val(line)
elif line.startswith("\tProduct:"):
pid = self.get_val(line)
elif line.startswith("\tName:"):
wwid = self.get_val(line)
size = self.get_size(dev)
self.h[dev] = dict(wwid=wwid, vid=vid, pid=pid, size=size)
def get(self, dev, type):
dev = dev.replace("/dev/vx/dmp/", "/dev/vx/rdmp/")
if self.deferred or dev not in self.h:
self.scan_dev(dev)
dummy = dict(wwid="unknown", vid="unknown", pid="unknown", size=0)
if dev not in self.h:
if self.zone:
return None
return dummy[type]
return self.h[dev][type]
def disk_id(self, dev):
return self.get(dev, 'wwid')
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
return self.get(dev, 'size')
opensvc-1.8~20170412/lib/provDiskMdLinux.py 0000644 0001750 0001750 00000005573 13073467726 020517 0 ustar jkelbert jkelbert from provisioning import Provisioning
import os
import rcExceptions as ex
from svcBuilder import conf_get_string_scope, conf_get_int_scope
from rcUtilities import convert_size
from subprocess import *
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def provisioner(self):
self.provisioner_md()
self.r.log.info("provisioned")
self.r.start()
return True
def provisioner_md(self):
if self.r.has_it():
self.r.log.info("already provisioned")
return
try:
level = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "level")
except:
raise ex.excError("'level' provisioning parameter not set")
try:
devs = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "devs").split()
except:
raise ex.excError("'devs' provisioning parameter not set")
if len(devs) == 0:
raise ex.excError("at least 2 devices must be set in the 'devs' provisioning parameter")
try:
spares = conf_get_int_scope(self.r.svc, self.r.svc.config, self.r.rid, 'spares')
except:
spares = 0
try:
chunk = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, 'chunk')
except:
chunk = None
try:
layout = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, 'layout')
except:
layout = None
# long md names cause a buffer overflow in mdadm
name = "/dev/md/"+self.r.svc.svcname.split(".")[0]+"."+self.r.rid.replace("#", ".")
cmd = [self.r.mdadm, '--create', name]
cmd += ['-n', str(len(devs)-spares)]
if level:
cmd += ["-l", level]
if spares:
cmd += ["-x", str(spares)]
if chunk:
cmd += ["-c", str(convert_size(chunk, _to="k", _round=4))]
if layout:
cmd += ["-p", layout]
cmd += devs
_cmd = "yes | " + " ".join(cmd)
self.r.log.info(_cmd)
p1 = Popen(["yes"], stdout=PIPE)
p2 = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=p1.stdout)
out, err = p2.communicate()
if p2.returncode != 0:
raise ex.excError(err)
if len(out) > 0:
self.r.log.info(out)
if len(err) > 0:
self.r.log.error(err)
self.r.uuid = os.path.basename(name)
uuid = self.get_real_uuid(name)
self.r.uuid = uuid
self.r.svc.config.set(self.r.rid, "uuid", uuid)
self.r.svc.write_config()
def get_real_uuid(self, name):
buff = self.r.detail()
for line in buff.split("\n"):
line = line.strip()
if line.startswith("UUID :"):
return line.split(" : ")[-1]
raise ex.excError("unable to determine md uuid")
opensvc-1.8~20170412/lib/resSyncBtrfs.py 0000644 0001750 0001750 00000035663 13073467726 020047 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import datetime
from subprocess import *
import rcExceptions as ex
import rcStatus
import resSync
from rcUtilities import justcall
import rcBtrfs
class SyncBtrfs(resSync.Sync):
"""define btrfs sync resource to be btrfs send/btrfs receive between nodes
"""
def sort_rset(self, rset):
rset.resources.sort(key=lambda x: x.src_subvol)
def __init__(self,
rid=None,
target=None,
src=None,
dst=None,
delta_store=None,
sender=None,
recursive=False,
snap_size=0,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.btrfs",
**kwargs)
self.label = "btrfs of %s to %s"%(src, ", ".join(target))
self.src = src
self.target = target
self.sender = sender
self.recursive = recursive
if ":" not in src or src.index(":") == len(src) - 1:
raise ex.excInitError("malformed src value")
if ":" not in dst or dst.index(":") == len(dst) - 1:
raise ex.excInitError("malformed dst value")
self.src_label = src[:src.index(":")]
self.src_subvol = src[src.index(":")+1:]
if dst is None:
self.dst_label = self.src_label
self.dst_subvol = self.src_subvol
else:
self.dst_label = dst[:dst.index(":")]
self.dst_subvol = dst[dst.index(":")+1:]
if delta_store is None:
self.delta_store = rcEnv.pathvar
else:
self.delta_store = delta_store
self.dst_btrfs = {}
self.src_btrfs = None
def init_src_btrfs(self):
if self.src_btrfs is not None:
return
try:
self.src_btrfs = rcBtrfs.Btrfs(label=self.src_label, log=self.log)
except rcBtrfs.ExecError as e:
raise ex.excError(str(e))
def pre_action(self, action):
"""Prepare snapshots
Don't sync PRD services when running on !PRD node
skip snapshot creation if delay_snap in tags
delay_snap should be used for oracle archive datasets
"""
resources = [ r for r in self.rset.resources if not r.skip and not r.is_disabled() ]
if len(resources) == 0:
return
if not action.startswith('sync'):
return
self.pre_sync_check_svc_not_up()
self.pre_sync_check_prd_svc_on_non_prd_node()
self.init_src_btrfs()
for i, r in enumerate(resources):
if 'delay_snap' in r.tags:
continue
r.get_targets(action)
tgts = r.targets.copy()
if len(tgts) == 0:
continue
r.get_src_info()
if not r.src_btrfs.has_subvol(r.src_snap_tosend):
r.create_snap(r.src, r.src_snap_tosend)
def __str__(self):
return "%s target=%s src=%s" % (resSync.Sync.__str__(self),\
self.target, self.src)
def create_snap(self, snap_orig, snap):
self.init_src_btrfs()
try:
self.src_btrfs.snapshot(snap_orig, snap, readonly=True, recursive=self.recursive)
except rcBtrfs.ExistError:
self.log.error('%s should not exist'%snap)
raise ex.excError
except rcBtrfs.ExecError:
raise ex.excError
def get_src_info(self):
self.init_src_btrfs()
subvol = self.src_subvol.replace('/','_')
base = self.src_btrfs.snapdir + '/' + subvol
self.src_snap_sent = base + '@sent'
self.src_snap_tosend = base + '@tosend'
self.src = os.path.join(self.src_btrfs.rootdir, self.src_subvol)
def get_dst_info(self, node):
if node not in self.dst_btrfs:
try:
self.dst_btrfs[node] = rcBtrfs.Btrfs(label=self.dst_label, log=self.log, node=node)
except rcBtrfs.ExecError as e:
raise ex.excError(str(e))
#self.dst_btrfs[node].setup_snap()
subvol = self.src_subvol.replace('/','_')
base = self.dst_btrfs[node].snapdir + '/' + subvol
self.dst_snap_sent = base + '@sent'
self.dst_snap_tosend = base + '@tosend'
self.dst = os.path.join(self.dst_btrfs[node].rootdir, self.dst_subvol)
def get_peersenders(self):
self.peersenders = set([])
if 'nodes' == self.sender:
self.peersenders |= self.svc.nodes
self.peersenders -= set([rcEnv.nodename])
def get_targets(self, action=None):
self.targets = set()
if 'nodes' in self.target and action in (None, 'sync_nodes'):
self.targets |= self.svc.nodes
if 'drpnodes' in self.target and action in (None, 'sync_drp'):
self.targets |= self.svc.drpnodes
self.targets -= set([rcEnv.nodename])
def sync_nodes(self):
self._sync_update('sync_nodes')
def sync_drp(self):
self._sync_update('sync_drp')
def sanity_checks(self):
self.pre_sync_check_svc_not_up()
self.pre_sync_check_flex_primary()
def sync_full(self):
self.init_src_btrfs()
try:
self.sanity_checks()
except ex.excError:
return
self.get_src_info()
if not self.src_btrfs.has_subvol(self.src_snap_tosend):
self.create_snap(self.src, self.src_snap_tosend)
self.get_targets()
for n in self.targets:
self.get_dst_info(n)
self.btrfs_send_initial(n)
self.rotate_snaps(n)
self.install_snaps(n)
self.rotate_snaps()
self.write_statefile()
for n in self.targets:
self.push_statefile(n)
def btrfs_send_incremental(self, node):
if self.recursive:
send_cmd = ['btrfs', 'send', '-R',
'-c', self.src_snap_sent,
'-p', self.src_snap_sent,
self.src_snap_tosend]
else:
send_cmd = ['btrfs', 'send',
'-c', self.src_snap_sent,
'-p', self.src_snap_sent,
self.src_snap_tosend]
receive_cmd = ['btrfs', 'receive', self.dst_btrfs[node].snapdir]
if node is not None:
receive_cmd = rcEnv.rsh.strip(' -n').split() + [node] + receive_cmd
self.log.info(' '.join(send_cmd + ["|"] + receive_cmd))
p1 = Popen(send_cmd, stdout=PIPE)
p2 = Popen(receive_cmd, stdin=p1.stdout, stdout=PIPE)
buff = p2.communicate()
if p2.returncode != 0:
if buff[1] is not None and len(buff[1]) > 0:
self.log.error(buff[1])
self.log.error("sync update failed")
raise ex.excError
if buff[0] is not None and len(buff[0]) > 0:
self.log.info(buff[0])
def btrfs_send_initial(self, node=None):
if self.recursive:
send_cmd = ['btrfs', 'send', '-R', self.src_snap_tosend]
else:
send_cmd = ['btrfs', 'send', self.src_snap_tosend]
receive_cmd = ['btrfs', 'receive', self.dst_btrfs[node].snapdir]
if node is not None:
receive_cmd = rcEnv.rsh.strip(' -n').split() + [node] + receive_cmd
self.log.info(' '.join(send_cmd + ["|"] + receive_cmd))
p1 = Popen(send_cmd, stdout=PIPE)
p2 = Popen(receive_cmd, stdin=p1.stdout, stdout=PIPE)
buff = p2.communicate()
if p2.returncode != 0:
if buff[1] is not None and len(buff[1]) > 0:
self.log.error(buff[1])
self.log.error("full sync failed")
raise ex.excError
if buff[0] is not None and len(buff[0]) > 0:
self.log.info(buff[0])
def remove_snap_tosend(self, node=None):
self.init_src_btrfs()
if node is not None:
o = self.dst_btrfs[node]
subvol = self.dst_snap_tosend
else:
o = self.src_btrfs
subvol = self.src_snap_tosend
if not o.has_subvol(subvol):
return
try:
o.subvol_delete(subvol, recursive=self.recursive)
except rcBtrfs.ExecError:
raise ex.excError()
def remove_snap(self, node=None):
self.init_src_btrfs()
if node is not None:
o = self.dst_btrfs[node]
subvol = self.dst_snap_sent
else:
o = self.src_btrfs
subvol = self.src_snap_sent
if not o.has_subvol(subvol):
return
try:
o.subvol_delete(subvol, recursive=self.recursive)
except rcBtrfs.ExecError:
raise ex.excError()
def rename_snap(self, node=None):
self.init_src_btrfs()
if node is None:
o = self.src_btrfs
src = self.src_snap_tosend
dst = self.src_snap_sent
else:
o = self.dst_btrfs[node]
src = self.dst_snap_tosend
dst = self.dst_snap_sent
if o.has_subvol(dst):
self.log.error("%s should not exist"%self.dst_snap_sent)
raise ex.excError
if self.recursive :
# ??
cmd = ['mv', src, dst]
else:
cmd = ['mv', src, dst]
if node is not None:
cmd = rcEnv.rsh.split() + [node] + cmd
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def remove_dst(self, node=None):
if node is None:
return
subvols = self.dst_btrfs[node].get_subvols_in_path(self.dst)
try:
self.dst_btrfs[node].subvol_delete(subvols)
except rcBtrfs.ExecError:
raise ex.excError()
def install_dst(self, node=None):
if node is None:
return
try:
self.dst_btrfs[node].snapshot(self.dst_snap_sent, self.dst, readonly=False)
except rcBtrfs.ExistError:
self.log.error('%s should not exist'%snap)
raise ex.excError()
except rcBtrfs.ExecError:
self.log.error("failed to install snapshot %s on node %s"%(self.dst, node))
raise ex.excError()
def install_snaps(self, node=None):
self.remove_dst(node)
self.install_dst(node)
def rotate_snaps(self, node=None):
self.remove_snap(node)
self.rename_snap(node)
def _sync_update(self, action):
self.init_src_btrfs()
try:
self.sanity_checks()
except ex.excError:
return
self.get_targets(action)
if len(self.targets) == 0:
return
self.get_src_info()
if not self.src_btrfs.has_subvol(self.src_snap_tosend):
self.create_snap(self.src, self.src_snap_tosend)
for n in self.targets:
self.get_dst_info(n)
self.remove_snap_tosend(n)
if self.src_btrfs.has_subvol(self.src_snap_sent) and self.dst_btrfs[n].has_subvol(self.dst_snap_sent):
self.btrfs_send_incremental(n)
else:
self.btrfs_send_initial(n)
self.rotate_snaps(n)
self.install_snaps(n)
self.rotate_snaps()
self.write_statefile()
for n in self.targets:
self.push_statefile(n)
def start(self):
pass
def stop(self):
pass
def can_sync(self, target=None):
try:
ls = self.get_local_state()
ts = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
except IOError:
self.log.error("btrfs state file not found")
return True
except:
import sys
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
return False
if self.skip_sync(ts):
self.status_log("Last sync on %s older than %i minutes"%(ts, self.sync_max_delay))
return False
return True
def _status(self, verbose=False):
self.init_src_btrfs()
try:
ls = self.get_local_state()
now = datetime.datetime.now()
last = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
delay = datetime.timedelta(minutes=self.sync_max_delay)
except IOError:
self.status_log("btrfs state file not found")
return rcStatus.WARN
except:
import sys
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
return rcStatus.WARN
if last < now - delay:
self.status_log("Last sync on %s older than %i minutes"%(last, self.sync_max_delay))
return rcStatus.WARN
return rcStatus.UP
def check_remote(self, node):
rs = self.get_remote_state(node)
if self.snap1_uuid != rs['uuid']:
self.log.error("%s last update uuid doesn't match snap1 uuid"%(node))
raise ex.excError
def get_remote_state(self, node):
self.set_statefile()
cmd1 = ['cat', self.statefile]
cmd = rcEnv.rsh.split() + [node] + cmd1
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("could not fetch %s last update uuid"%node)
raise ex.excError
return self.parse_statefile(out, node=node)
def get_local_state(self):
self.set_statefile()
with open(self.statefile, 'r') as f:
out = f.read()
return self.parse_statefile(out)
def get_snap_uuid(self, snap):
self.init_src_btrfs()
self.snap_uuid = self.src_btrfs.get_transid(snap)
def set_statefile(self):
self.statefile = os.path.join(rcEnv.pathvar,
self.svc.svcname+'_'+self.rid+'_btrfs_state')
def write_statefile(self):
self.set_statefile()
self.get_snap_uuid(self.src_snap_sent)
self.log.info("update state file with snap uuid %s"%self.snap_uuid)
with open(self.statefile, 'w') as f:
f.write(str(datetime.datetime.now())+';'+self.snap_uuid+'\n')
def _push_statefile(self, node):
cmd = rcEnv.rcp.split() + [self.statefile, node+':'+self.statefile.replace('#', '\#')]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def push_statefile(self, node):
self.set_statefile()
self._push_statefile(node)
self.get_peersenders()
for s in self.peersenders:
self._push_statefile(s)
def parse_statefile(self, out, node=None):
self.set_statefile()
if node is None:
node = rcEnv.nodename
lines = out.strip().split('\n')
if len(lines) != 1:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
fields = lines[0].split(';')
if len(fields) != 2:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
return dict(date=fields[0], uuid=fields[1])
opensvc-1.8~20170412/lib/resDiskVgAIX.py 0000644 0001750 0001750 00000020227 13073467726 017651 0 ustar jkelbert jkelbert import rcExceptions as ex
import resDisk
import json
import os
import logging
import shutil
from rcGlobalEnv import rcEnv
from subprocess import *
# ajouter un dump regulier de la config des vg (pour ne pas manquer les extensions de vol)
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
name=None,
dsf=True,
**kwargs):
self.label = "vg "+name
self.dsf = dsf
resDisk.Disk.__init__(self,
rid=rid,
name=name,
type='disk.vg',
**kwargs)
def has_it(self):
""" returns True if the volume is present
"""
if self.is_active():
return True
if self.is_imported():
return True
return False
def is_active(self):
cmd = [ 'lsvg', self.name ]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
if not "active" in buff[0]:
return False
return True
def is_imported(self):
cmd = ['lsvg']
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
for vg in buff[0].split('\n'):
if vg == self.name:
return True
return False
def is_up(self):
"""Returns True if the volume group is present and activated
"""
if not self.is_imported():
return False
if not self.is_active():
return False
return True
def pvid2hdisk(self,mypvid):
cmd = ['lspv']
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
hdisk = "notfound"
for line in buff[0].split('\n'):
if mypvid in line:
elem = line.split()
#print("<%s> {%s}"%(line, elem[0]))
return elem[0] # first hdisk name matching requested pvid
def dumped_pvids(self, p):
if not os.path.exists(p):
return []
with open(p) as f:
s = f.read()
try:
data = json.loads(s)
except:
return []
l = []
for line in data:
pvid = line.get('pvid')
if pvid is not None:
l.append(pvid)
return l
def dump_changed(self):
pvids1 = self.dumped_pvids(self.vgfile_name())
pvids2 = self.dumped_pvids(self.vgimportedfile_name())
if set(pvids1) == set(pvids2):
return False
return True
def do_import(self):
if not os.path.exists(self.vgfile_name()):
raise ex.excError("%s should exist" % self.vgfile_name())
if not self.dump_changed() and self.is_imported():
self.log.info("%s is already imported" % self.name)
return
if self.dump_changed() and self.is_imported():
if self.is_active():
self.log.warning("%s is active. can't reimport." % self.name)
return
self.do_export()
with open(self.vgfile_name()) as f:
s = f.read()
try:
data = json.loads(s)
except:
raise ex.excError("%s is misformatted" % self.vgfile_name())
self.pvids = {}
missing = []
for l in data:
pvid = l.get('pvid')
if pvid is None:
continue
hdisk = self.pvid2hdisk(pvid)
self.pvids[pvid] = hdisk
if hdisk == "notfound":
missing.append(pvid)
# check for missing devices
if len(missing) > 1:
raise ex.excError("Missing hdisks for pvids %s to be able to import vg" % ','.join(missing))
elif len(missing) == 1:
raise ex.excError("Missing hdisk for pvid %s to be able to import vg" % ','.join(missing))
myhdisks = self.pvids.values()
cmd = ['importvg', '-n', '-y', self.name, myhdisks[0]]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
shutil.copy2(self.vgfile_name(), self.vgimportedfile_name())
def do_export(self):
if not self.is_imported():
self.log.info("%s is already exported" % self.name)
return
cmd = ['exportvg', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_activate(self):
if self.is_active():
self.log.info("%s is already available" % self.name)
return
cmd = ['varyonvg', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_deactivate(self):
if not self.is_active():
self.log.info("%s is already unavailable" % self.name)
return
cmd = ['varyoffvg', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_start(self):
self.do_import()
self.do_activate()
self.do_dumpcfg()
self.can_rollback = True
def do_stop(self):
self.do_dumpcfg()
self.do_deactivate()
def vgfile_name(self):
return os.path.join(rcEnv.pathvar, self.name + '.vginfo')
def vgimportedfile_name(self):
return os.path.join(rcEnv.pathvar, self.name + '.vginfo.imported')
def files_to_sync(self):
return [self.vgfile_name()]
def do_dumpcfg(self):
cmd = ['lspv']
p = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
out, err = p.communicate()
if p.returncode != 0:
return
data = []
for line in out.split('\n'):
l = line.split()
n = len(l)
h = {}
for i, key in enumerate(['hdisk', 'pvid', 'vg', 'state']):
if i >= n -1:
break
h[key] = l[i]
vg = h.get('vg')
if vg is not None and vg == self.name:
data.append(h)
if len(data) == 0:
# don't overwrite existing dump file with an empty dataset
return
s = json.dumps(data)
with open(self.vgfile_name(), 'w') as f:
f.write(s)
"""
root@host:/$ lspv
hdisk0 00078e0b282e417a rootvg active
hdisk1 none None
hdisk2 00078e0bb1618c92 tstvg active
hdisk3 00078e0bb161b59e tstvg active
hdisk4 none None
hdisk5 none None
=>
[{'hdisk': 'hdisk0', 'pvid': '00078e0b282e417a', 'vg': 'rootvg', 'state': 'active'},
{'hdisk': 'hdisk1', 'pvid': 'none', 'vg': 'None'},
{'hdisk': 'hdisk2', 'pvid': '00078e0bb1618c92', 'vg': 'testvg', 'state': 'active'},
{'hdisk': 'hdisk3', 'pvid': '00078e0bb161b59e', 'vg': 'testvg', 'state': 'active'},
{'hdisk': 'hdisk4', 'pvid': 'none', 'vg': 'None'},
{'hdisk': 'hdisk5', 'pvid': 'none', 'vg': 'None'}]
"""
def disklist(self):
if self.is_active():
return self.disklist_active()
return self.disklist_inactive()
def disklist_active(self):
cmd = ['lsvg', '-p', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
for e in out.split('\n'):
x = e.split()
if len(x) != 5:
continue
self.disks |= set([x[0]])
return self.disks
def disklist_inactive(self):
self.disks = set([])
if not os.path.exists(self.vgfile_name()):
return self.disks
with open(self.vgfile_name()) as f:
s = f.read()
try:
data = json.loads(s)
except:
return self.disks
for l in data:
pvid = l.get('pvid')
if pvid is None:
continue
hdisk = self.pvid2hdisk(pvid)
if hdisk == "notfound":
continue
self.disks.add(hdisk)
return self.disks
opensvc-1.8~20170412/lib/provDiskZpool.py 0000644 0001750 0001750 00000002015 13073467726 020226 0 ustar jkelbert jkelbert from provisioning import Provisioning
import rcExceptions as ex
from svcBuilder import conf_get_string_scope
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def unprovisioner(self):
self.r.stop()
if not self.r.has_it():
self.r.log.info("already unprovisionned")
return
cmd = ["zpool", "destroy", "-f", self.r.name]
self.r.vcall(cmd)
self.r.log.info("unprovisionned")
def provisioner(self):
try:
self.name = self.r.name
self.vdev = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "vdev").split()
except Exception as e:
raise ex.excError(str(e))
if self.r.has_it():
self.r.log.info("already provisionned")
self.r.start()
return
cmd = ["zpool", "create", "-m", "legacy", self.name] + self.vdev
self.r.vcall(cmd)
self.r.log.info("provisioned")
self.r.start()
opensvc-1.8~20170412/lib/rcStatsCollectLinux.py 0000644 0001750 0001750 00000011042 13073467726 021352 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import sys
import datetime
import time
import json
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
def collect(node):
now = str(datetime.datetime.now())
def fs_u():
cmd = ['df', '-lP']
(out, err, ret) = justcall(cmd)
if ret != 0:
return
lines = out.split('\n')
if len(lines) < 2:
return
vals = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
vals.append([now, node.nodename, l[5], l[1], l[4].replace('%','')])
stats_fs_u_p = os.path.join(rcEnv.pathvar, 'stats_fs_u.%d' % datetime.datetime.now().day)
if not os.path.exists(stats_fs_u_p):
# create the stats file
mode = 'w+'
elif os.stat(stats_fs_u_p).st_mtime < time.time() - 86400:
# reset the stats file from last month
mode = 'w+'
else:
# append to the daily stats file
mode = 'a'
with open(stats_fs_u_p, mode) as f:
f.write(json.dumps(vals)+'\n')
"""
xentop
NAME STATE CPU(sec) CPU(%) MEM(k) MEM(%) MAXMEM(k) MAXMEM(%) VCPUS NETS NETTX(k) NETRX(k) VBDS VBD_OO VBD_RD VBD_WR VBD_RSECT VBD_WSECT SSID
"""
def xentop(node):
import os, sys, platform
import select
import logging
import time
import datetime
import subprocess
if not which('xentop'):
return
node.build_services()
containernames = {}
for svc in node.svcs:
for r in svc.get_resources("container"):
if r.type in ("container.ovm", "container.xen"):
if hasattr(r, "uuid"):
containernames[r.uuid] = r.name
zs_d = os.path.join(rcEnv.pathlog, 'xentop')
zs_prefix = 'xentop'
zs_f = os.path.join(zs_d, zs_prefix + datetime.datetime.now().strftime("%d"))
datenow = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
n = datetime.datetime.now()
tn = time.mktime(n.timetuple())
if not os.path.exists(zs_d):
os.makedirs(zs_d)
try:
t = os.path.getmtime(zs_f)
d = tn - t
except:
d = 0
if d > 27*24*3600:
os.remove(zs_f)
f = open(zs_f, "a")
stor = {}
p = subprocess.Popen('xentop -b -d.1 -i2 -f',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
bufsize=0)
out = p.stdout.readline()
pr = 0
while out:
line = out
line = line.rstrip("\n")
if "NAME" in line:
pr += 1
out = p.stdout.readline()
continue
line = line.replace("no limit", "0")
fields = line.split()
if len(fields) == 19 and pr > 1:
uuid = fields[0]
if uuid in containernames:
uuid = containernames[uuid]
stor[uuid] = {
'STATE': fields[1],
'CPU_SEC': fields[2],
'CPU_PCT': fields[3],
'MEM': str(int(fields[4])//1024),
'MEM_PCT': fields[5],
'MEM_MAX': str(int(fields[6])//1024),
'MEM_MAX_PCT': fields[7],
'VCPUS': fields[8],
'NETS': fields[9],
'NET_TX': fields[10],
'NET_RX': fields[11],
'VBDS': fields[12],
'VBD_OO': fields[13],
'VBD_RD': fields[14],
'VBD_WR': fields[15],
'VBD_RSECT': fields[16],
'VBD_WSECT': fields[17],
'SSID': fields[18]
}
print(datenow, uuid, stor[uuid]['STATE'], stor[uuid]['CPU_SEC'], stor[uuid]['CPU_PCT'], stor[uuid]['MEM'], stor[uuid]['MEM_PCT'], stor[uuid]['MEM_MAX'], stor[uuid]['MEM_MAX_PCT'], stor[uuid]['VCPUS'], stor[uuid]['NETS'], stor[uuid]['NET_TX'], stor[uuid]['NET_RX'], stor[uuid]['VBDS'], stor[uuid]['VBD_OO'], stor[uuid]['VBD_RD'], stor[uuid]['VBD_WR'], stor[uuid]['VBD_RSECT'], stor[uuid]['VBD_WSECT'], stor[uuid]['SSID'], file=f)
out = p.stdout.readline()
p.wait()
fs_u()
xentop(node)
opensvc-1.8~20170412/lib/rcWinScheduler.py 0000644 0001750 0001750 00000005370 13073467726 020331 0 ustar jkelbert jkelbert '''
Author: Alex Baker
Date: 7th July 2008
Description : Simple python program to generate wrap as a service based on example on the web, see link below.
http://essiene.blogspot.com/2005/04/python-windows-services.html
Usage : python aservice.py install
Usage : python aservice.py start
Usage : python aservice.py stop
Usage : python aservice.py remove
C:\>python aservice.py --username --password --startup auto install
'''
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import win32evtlogutil
import os
import servicemanager
import datetime
from subprocess import *
import sys
from rcListener import listener
class OsvcSched(win32serviceutil.ServiceFramework):
_svc_name_ = "OsvcSched"
_svc_display_name_ = "OpenSVC job scheduler"
_svc_description_ = "Schedule the OpenSVC jobs"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
now = datetime.datetime.now()
self.next_task10 = now + datetime.timedelta(minutes=1)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
sys.stop_listener = True
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
a = listener()
self.timeout = 60000
while 1:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg("%s - STOPPED"%self._svc_name_)
break
else:
#servicemanager.LogInfoMsg("%s - ALIVE"%self._svc_name_)
self.SvcDoJob()
def SvcDoJob(self):
now = datetime.datetime.now()
if now > self.next_task10:
self.run_task10()
self.next_task10 = now + datetime.timedelta(minutes=10)
def run_task10(self):
cmd = [rcEnv.svcmon, "--updatedb"]
servicemanager.LogInfoMsg("run %s" % ' '.join(cmd))
p = Popen(cmd, stdout=None, stderr=None, stdin=None)
p.communicate()
servicemanager.LogInfoMsg("run internal scheduler")
cmd = [rcEnv.cron]
p = Popen(cmd, stdout=None, stderr=None, stdin=None)
p.communicate()
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(OsvcSched)
opensvc-1.8~20170412/lib/rcMountsLinux.py 0000644 0001750 0001750 00000002350 13073467726 020235 0 ustar jkelbert jkelbert import rcMounts
from rcLoopLinux import file_to_loop
from rcUtilities import *
class Mounts(rcMounts.Mounts):
df_one_cmd = ['df', '-l']
def match_mount(self, i, dev, mnt):
"""Given a line of 'mount' output, returns True if (dev, mnt) matches
this line. Returns False otherwize. Also care about weirdos like loops
and binds, ...
"""
if os.path.isdir(dev):
is_bind = True
src_dir_dev = self.get_src_dir_dev(dev)
else:
is_bind = False
if i.mnt != mnt:
return False
if i.dev == dev:
return True
if i.dev in file_to_loop(dev):
return True
if is_bind and i.dev == src_dir_dev:
return True
return False
def __init__(self):
self.mounts = []
(ret, out, err) = call(['mount'])
out = out.replace(" (deleted)", "")
for l in out.split('\n'):
if len(l.split()) != 6:
return
dev, null, mnt, null, type, mnt_opt = l.split()
m = rcMounts.Mount(dev, mnt, type, mnt_opt.strip('()'))
self.mounts.append(m)
if __name__ == "__main__" :
help(Mounts)
for m in Mounts():
print(m)
opensvc-1.8~20170412/lib/resDiskHpVm.py 0000644 0001750 0001750 00000010543 13073467726 017605 0 ustar jkelbert jkelbert import re
import os
import rcExceptions as ex
import rcStatus
resVg = __import__("resDiskVgHP-UX")
from subprocess import *
from rcUtilities import qcall
from rcGlobalEnv import rcEnv
from subprocess import *
class Disk(resVg.Disk):
def __init__(self,
rid=None,
name=None,
container_name=None,
**kwargs):
self.label = "vmdg "+str(name)
self.container_name = container_name
resVg.Disk.__init__(self,
rid=rid,
name=name,
type='disk.vg',
**kwargs)
def has_it(self):
return True
def is_up(self):
return True
def _status(self, verbose=False):
return rcStatus.NA
def do_start(self):
self.do_mksf()
def do_stop(self):
pass
def files_to_sync(self):
return [self.sharefile_name(), self.mkfsfile_name()]
def postsync(self):
s = self.svc.group_status(excluded_groups=set(["sync", "hb"]))
if s['overall'].status != rcStatus.UP:
self.do_mksf()
self.do_share()
def presync(self):
s = self.svc.group_status(excluded_groups=set(["sync", "hb"]))
if self.svc.options.force or s['overall'].status == rcStatus.UP:
self.write_mksf()
self.write_share()
def sharefile_name(self):
return os.path.join(rcEnv.pathvar, 'vg_' + self.svc.svcname + '_' + self.name + '.share')
def get_devs(self):
cmd = ['/opt/hpvm/bin/hpvmdevmgmt', '-l', 'all']
(ret, buff, err) = self.call(cmd)
if ret != 0:
raise ex.excError
if len(buff) == 0:
return []
a = {}
for line in buff.split('\n'):
if len(line) == 0:
continue
if "DEVTYPE=FILE" not in line and "DEVTYPE=DISK" not in line:
continue
if "SHARE=YES" in line:
share = "YES"
else:
share = "NO"
devs = line.split(":")[0]
for dev in devs.split(","):
a[dev] = {'share': share}
return a
def write_share(self):
devs = self.get_devs()
disklist = self.disklist()
with open(self.sharefile_name(), 'w') as f:
for dev in devs:
if dev not in disklist:
continue
f.write("%s:%s\n"%(dev, devs[dev]['share']))
def do_share(self):
if not os.path.exists(self.sharefile_name()):
return
devs = self.get_devs()
errors = 0
with open(self.sharefile_name(), 'r') as f:
for line in f.readlines():
l = line.split(':')
if len(l) != 2:
continue
dev = l[0]
share = l[1].strip()
if len(dev) == 0:
continue
if not os.path.exists(dev):
continue
if dev not in devs:
cmd = ['/opt/hpvm/bin/hpvmdevmgmt', '-a', 'gdev:'+dev]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("error adding device %s hpvm device table"%dev)
raise ex.excError
if dev in devs and share == devs[dev]['share']:
self.log.debug("skip set sharing of %s: already set to %s"%(dev, devs[dev]['share']))
continue
cmd = ['/opt/hpvm/bin/hpvmdevmgmt', '-m', 'gdev:'+dev+':attr:SHARE='+share]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
self.log.error("error setting the shared attribute for %s"%dev)
errors += 1
continue
if errors > 0:
raise ex.excError
def disklist(self):
cmd = ['/opt/hpvm/bin/hpvmstatus', '-d', '-P', self.container_name]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = p.communicate()
if p.returncode != 0:
raise ex.excError
for line in buff[0].split('\n'):
l = line.split(':')
if len(l) < 5:
continue
if l[3] != 'disk':
continue
self.disks |= set([l[4]])
return self.disks
opensvc-1.8~20170412/lib/resIpHP-UX.py 0000644 0001750 0001750 00000002076 13073467726 017254 0 ustar jkelbert jkelbert import resIp as Res
u = __import__('rcUtilitiesHP-UX')
from rcUtilities import to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return u.check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
""" arp_announce job is done by HP-UX ifconfig... """
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', 'up']
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = ['ifconfig', self.stacked_dev, 'inet6', self.addr+'/'+to_cidr(self.mask), 'up']
else:
cmd = ['ifconfig', self.stacked_dev, self.addr, 'netmask', to_dotted(self.mask), 'up']
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.stacked_dev, "inet6", "::"]
else:
cmd = ['ifconfig', self.stacked_dev, "0.0.0.0"]
return self.vcall(cmd)
opensvc-1.8~20170412/lib/rcMountsDarwin.py 0000644 0001750 0001750 00000002475 13073467726 020372 0 ustar jkelbert jkelbert import rcMounts
from rcUtilities import *
class Mounts(rcMounts.Mounts):
df_one_cmd = ['df', '-l']
def match_mount(self, i, dev, mnt):
"""Given a line of 'mount' output, returns True if (dev, mnt) matches
this line. Returns False otherwize. Also care about weirdos like loops
and binds, ...
"""
if os.path.isdir(dev):
is_bind = True
src_dir_dev = self.get_src_dir_dev(dev)
else:
is_bind = False
if i.mnt != mnt:
return False
if i.dev == dev:
return True
if is_bind and i.dev == src_dir_dev:
return True
return False
def __init__(self):
self.mounts = []
(ret, out, err) = call(['mount'])
for l in out.split('\n'):
words = l.split()
if len(words) < 4:
return
dev = words[0]
mnt = words[2]
opts = ' '.join(words[3:]).strip('(').strip(')').split(', ')
type = opts[0]
if len(opts) < 3:
mnt_opt = ''
else:
mnt_opt = ','.join(opts[2:])
m = rcMounts.Mount(dev, mnt, type, mnt_opt)
self.mounts.append(m)
if __name__ == "__main__" :
help(Mounts)
for m in Mounts():
print(m)
opensvc-1.8~20170412/lib/resFsDir.py 0000644 0001750 0001750 00000007242 13073467726 017131 0 ustar jkelbert jkelbert import resources as Res
import os
import rcExceptions as ex
import rcStatus
from rcGlobalEnv import rcEnv
from rcUtilities import is_string
import pwd
import grp
import stat
class FsDir(Res.Resource):
"""Define a mount resource
"""
def __init__(self,
rid=None,
path=None,
user=None,
group=None,
perm=None,
**kwargs):
Res.Resource.__init__(self,
rid=rid,
type="fs",
**kwargs)
self.path = path
self.mount_point = path # for fs ordering
self.user = user
self.group = group
self.perm = perm
self.label = "dir " + path
def start(self):
self.create()
def get_gid(self):
if is_string(self.group):
info = grp.getgrnam(self.group)
self.gid = info[2]
else:
self.gid = int(self.group)
def get_uid(self):
if is_string(self.user):
info = pwd.getpwnam(self.user)
self.uid = info[2]
else:
self.uid = int(self.user)
def create(self):
if not os.path.exists(self.path):
self.log.info("create directory %s" % (self.path))
os.makedirs(self.path)
if not self.check_uid():
self.log.info("set %s user to %s" % (self.path, str(self.user)))
os.chown(self.path, self.uid, -1)
if not self.check_gid():
self.log.info("set %s group to %s" % (self.path, str(self.group)))
os.chown(self.path, -1, self.gid)
if not self.check_perm():
self.log.info("set %s perm to %s" % (self.path, str(self.perm)))
os.chmod(self.path, int(str(self.perm), 8))
def check_uid(self):
if self.user is None:
return True
if not os.path.exists(self.path):
return True
self.get_uid()
uid = os.stat(self.path).st_uid
if uid != self.uid:
self.status_log('uid should be %s but is %s'%(str(self.uid), str(uid)))
return False
return True
def check_gid(self):
if self.group is None:
return True
if not os.path.exists(self.path):
return True
self.get_gid()
gid = os.stat(self.path).st_gid
if gid != self.gid:
self.status_log('gid should be %s but is %s'%(str(self.gid), str(gid)))
return False
return True
def check_perm(self):
if self.perm is None:
return True
if not os.path.exists(self.path):
return True
perm = oct(stat.S_IMODE(os.stat(self.path).st_mode))
perm = str(perm).lstrip("0o").lstrip("0")
if perm != str(self.perm):
self.status_log('perm should be %s but is %s'%(str(self.perm), perm))
return False
return True
def _status(self, verbose=False):
if not os.path.exists(self.path):
self.status_log("dir %s does not exist" % self.path)
self.check_uid()
self.check_gid()
self.check_perm()
if self.status_logs_count(["warn", "error"]) > 0:
return rcStatus.WARN
else:
return rcStatus.NA
def __str__(self):
return "%s path=%s user=%s group=%s perm=%s" % (Res.Resource.__str__(self),\
self.path, str(self.user), str(self.group), str(self.perm))
def __lt__(self, other):
"""
Order so that deepest mountpoint can be umount first
"""
return self.mount_point < other.mount_point
def provision(self):
self.create()
opensvc-1.8~20170412/lib/collector.py 0000644 0001750 0001750 00000025440 13073467726 017376 0 ustar jkelbert jkelbert from __future__ import print_function
from stat import *
import os
import sys
import re
import datetime
import rcExceptions as ex
from rcGlobalEnv import rcEnv
from rcUtilities import is_exe, justcall, banner
from subprocess import *
class Collector(object):
def __init__(self, options=None, node=None, svcname=None):
self.options = options
self.node = node
self.collector = node.collector
self.svcname = svcname
self.options = options
def rotate_root_pw(self, pw):
opts = {}
opts['pw'] = pw
d = self.collector.call('collector_update_root_pw', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_ack_unavailability(self):
if self.svcname is None:
return
opts = {}
opts['svcname'] = self.svcname
if self.options.begin is not None:
opts['begin'] = self.options.begin
if self.options.end is not None:
opts['end'] = self.options.end
if self.options.author is not None:
opts['author'] = self.options.author
if self.options.comment is not None:
opts['comment'] = self.options.comment
if self.options.duration is not None:
opts['duration'] = self.options.duration
if self.options.account:
opts['account'] = "1"
else:
opts['account'] = "0"
d = self.collector.call('collector_ack_unavailability', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_list_unavailability_ack(self):
if self.svcname is None:
return
opts = {}
opts['svcname'] = self.svcname
if self.options.begin is not None:
opts['begin'] = self.options.begin
if self.options.end is not None:
opts['end'] = self.options.end
if self.options.author is not None:
opts['author'] = self.options.author
if self.options.comment is not None:
opts['comment'] = self.options.comment
if self.options.duration is not None:
opts['duration'] = self.options.duration
if self.options.account:
opts['account'] = "1"
else:
opts['account'] = "0"
d = self.collector.call('collector_list_unavailability_ack', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_list_actions(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
if self.options.begin is not None:
opts['begin'] = self.options.begin
if self.options.end is not None:
opts['end'] = self.options.end
if self.options.duration is not None:
opts['duration'] = self.options.duration
d = self.collector.call('collector_list_actions', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_ack_action(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
if self.options.author is not None:
opts['author'] = self.options.author
if self.options.comment is not None:
opts['comment'] = self.options.comment
if self.options.id == 0:
raise ex.excError("--id is not set")
else:
opts['id'] = self.options.id
d = self.collector.call('collector_ack_action', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_networks(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
d = self.collector.call('collector_networks', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_asset(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
d = self.collector.call('collector_asset', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_checks(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
d = self.collector.call('collector_checks', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_disks(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
d = self.collector.call('collector_disks', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_alerts(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
d = self.collector.call('collector_alerts', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_events(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
if self.options.begin is not None:
opts['begin'] = self.options.begin
if self.options.end is not None:
opts['end'] = self.options.end
d = self.collector.call('collector_events', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_show_actions(self):
opts = {}
if self.svcname is not None:
opts['svcname'] = self.svcname
if self.options.id != 0:
opts['id'] = self.options.id
if self.options.begin is not None:
opts['begin'] = self.options.begin
if self.options.end is not None:
opts['end'] = self.options.end
d = self.collector.call('collector_show_actions', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_untag(self):
opts = {}
opts['tag_name'] = self.options.tag
if self.svcname:
opts['svcname'] = self.svcname
d = self.collector.call('collector_untag', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_tag(self):
opts = {}
opts['tag_name'] = self.options.tag
if self.svcname:
opts['svcname'] = self.svcname
d = self.collector.call('collector_tag', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_create_tag(self):
opts = {}
opts['tag_name'] = self.options.tag
if opts['tag_name'] is None:
print("missing parameter: --tag", file=sys.stderr)
return 1
if self.svcname:
opts['svcname'] = self.svcname
d = self.collector.call('collector_create_tag', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
def collector_list_tags(self):
d = self._collector_list_tags()
for tag in d:
print(tag)
def _collector_list_tags(self):
opts = {'pattern': self.options.like}
if self.svcname:
opts['svcname'] = self.svcname
d = self.collector.call('collector_list_tags', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_show_tags(self):
opts = {}
if self.svcname:
opts['svcname'] = self.svcname
d = self.collector.call('collector_show_tags', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_list_nodes(self):
opts = {'fset': self.options.filterset}
d = self.collector.call('collector_list_nodes', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_list_services(self):
opts = {'fset': self.options.filterset}
d = self.collector.call('collector_list_services', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_list_filtersets(self):
opts = {'fset': self.options.filterset}
d = self.collector.call('collector_list_filtersets', opts)
if d is None:
raise ex.excError("xmlrpc unknown failure")
if d['ret'] != 0:
raise ex.excError(d['msg'])
return d['data']
def collector_search(self):
path = "/search?"
if self.options.like.count(":") == 1:
t, s = self.options.like.split(":")
t = t.strip()
s = s.strip()
path += "substring=%s&in=%s" % (s, t)
else:
s = self.options.like
path += "substring=%s" % s
d = self.node.collector_rest_get(path)
data = []
for t, _d in d["data"].items():
if _d["total"] == 0:
continue
print("%s (%d/%d)" % (t, len(_d["data"]), _d["total"]))
for e in d["data"][t]["data"]:
e_name = _d["fmt"]["name"] % e
e_id = _d["fmt"]["id"] % e
print(" %s: %s" % (e_id, e_name))
def collector_log(self):
path = "/logs"
data = {
"log_fmt": self.options.message,
}
d = self.node.collector_rest_post(path, data, svcname=self.svcname)
if "error" in d:
raise ex.excError(d["error"])
print("logged")
opensvc-1.8~20170412/lib/rcStats.py 0000644 0001750 0001750 00000006214 13073467726 017031 0 ustar jkelbert jkelbert import os
import datetime
class StatsProvider(object):
def __init__(self, interval=2880, stats_dir=None, stats_start=None, stats_end=None):
self.stats_dir = stats_dir
self.interval = interval
if stats_end is None:
self.stats_end = datetime.datetime.now()
else:
self.stats_end = datetime.datetime.strptime(stats_end,"%Y-%m-%d %H:%M")
if stats_start is None:
self.stats_start = self.stats_end - datetime.timedelta(minutes=interval)
else:
self.stats_start = datetime.datetime.strptime(stats_start,"%Y-%m-%d %H:%M")
delta = self.stats_end - self.stats_start
interval = delta.days * 1440 + delta.seconds // 60
x, self.nodename, x, x, x = os.uname()
self.minutes_first_day = 60*self.stats_end.hour + self.stats_end.minute + 1
one_minute = datetime.timedelta(minutes=1)
one_day = datetime.timedelta(days=1)
self.ranges = []
i = 0
end = self.stats_end
while end > self.stats_start:
start = end - one_day
if start < self.stats_start:
start = self.stats_start
if start.day != end.day:
start = end - datetime.timedelta(hours=end.hour, minutes=end.minute)
if start != end:
self.ranges.append((start, end))
end = start - one_minute
#print(self.stats_end, interval, map(lambda x: map(lambda y: y.strftime("%d-%m-%y %H:%M"), x), self.ranges))
def get(self, fname):
lines = []
cols = []
if not hasattr(self, fname):
print(fname, 'is not implemented')
return cols, lines
for start, end in self.ranges:
date = start.strftime("%Y-%m-%d")
day = start.strftime("%d")
start = start.strftime("%H:%M:%S")
end = end.strftime("%H:%M:%S")
_cols, _lines = getattr(self, fname)(date, day, start, end)
if len(_cols) == 0 or len(_lines) == 0:
continue
cols = _cols
lines += _lines
return cols, lines
def sarfile(self, day):
if self.stats_dir is None:
stats_dir = os.path.join(os.sep, 'var', 'log', 'sysstat')
if not os.path.exists(stats_dir):
stats_dir = os.path.join(os.sep, 'var', 'log', 'sa')
else:
stats_dir = self.stats_dir
f = os.path.join(stats_dir, 'sa'+day)
if os.path.exists(f):
return f
return None
def cpu(self, d, day, start, end):
return [], []
def mem_u(self, d, day, start, end):
return [], []
def proc(self, d, day, start, end):
return [], []
def swap(self, d, day, start, end):
return [], []
def block(self, d, day, start, end):
return [], []
def blockdev(self, d, day, start, end):
return [], []
def netdev(self, d, day, start, end):
return [], []
def netdev_err(self, d, day, start, end):
return [], []
if __name__ == "__main__":
sp = StatsProvider(interval=20)
print(sp.get('cpu'))
print(sp.get('swap'))
opensvc-1.8~20170412/lib/checkFmOpenManage.py 0000644 0001750 0001750 00000003150 13073467726 020675 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
class check(checks.check):
omreport = "/opt/dell/srvadmin/bin/omreport"
chk_type = "om"
chk_name = "OpenManage"
def find_omreport(self):
if which(self.omreport):
return self.omreport
return
def do_check(self):
r = self.do_check_system()
r += self.do_check_chassis()
return r
def do_check_chassis(self):
return self.do_check_gen("chassis")
def do_check_system(self):
return self.do_check_gen("system")
def do_check_gen(self, command):
omreport = self.find_omreport()
if omreport is None:
return self.undef
cmd = [omreport, command]
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
r = []
for line in lines:
l = line.split(" : ")
if len(l) != 2:
continue
inst = l[1].strip().lower()
state = l[0].strip().lower()
if state == "severity":
continue
elif state == "ok":
state = 0
else:
state = 1
r.append({
'chk_instance': inst,
'chk_value': str(state),
'chk_svcname': '',
})
return r
if __name__ == "__main__":
from rcUtilities import printplus
o = check()
tab = o.do_check()
printplus(tab)
opensvc-1.8~20170412/lib/rcConfigParser.py 0000644 0001750 0001750 00000003344 13073467726 020316 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
if sys.version_info[0] < 3:
class RawConfigParser(ConfigParser.RawConfigParser):
def __init__(self, *args, **kwargs):
ConfigParser.RawConfigParser.__init__(self, *args, **kwargs)
def write(self, fp):
"""
Write an .ini formatted representation of the configuration.
"""
encoding = sys.stdin.encoding if sys.stdin.encoding else 'UTF-8'
if self._defaults:
fp.write("[%s]\n" % ConfigParser.DEFAULTSECT)
for (key, value) in self._defaults.items():
if type(value) != unicode:
value = value.decode(encoding)
fp.write("%s = %s\n" % (key, value.replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if type(value) != unicode:
value = value.decode(encoding)
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, value.replace('\n', '\n\t')))
fp.write("%s\n" % (key.encode("utf-8")))
fp.write("\n")
else:
class RawConfigParser(ConfigParser.RawConfigParser):
def __init__(self, *args, **kwargs):
if sys.version_info[0] >= 3:
kwargs["strict"] = False
ConfigParser.RawConfigParser.__init__(self, *args, **kwargs)
opensvc-1.8~20170412/lib/nodemgr_parser.py 0000644 0001750 0001750 00000063471 13073467726 020425 0 ustar jkelbert jkelbert """
nodemgr command line actions and options
"""
from rcGlobalEnv import Storage
from rcOptParser import OptParser
from optparse import Option
PROG = "nodemgr"
OPT = Storage({
"api": Option(
"--api", default=None, action="store", dest="api",
help="specify a collector api url different from the "
"one set in node.conf. Honored by the 'collector "
"cli' action."),
"app": Option(
"--app", default=None, action="store", dest="app",
help="Optional with the register command, register the "
"node in the specified app. If not specified, the "
"node is registered in the first registering "
"user's app found."),
"attach": Option(
"--attach", default=False,
action="store_true", dest="attach",
help="attach the modulesets specified during a "
"compliance check/fix/fixable command"),
"author": Option(
"--author", default=None,
action="store", dest="author",
help="the acker name to log when used with the "
"'collector ack action' action"),
"begin": Option(
"--begin", default=None,
action="store", dest="begin",
help="a begin date expressed as 'YYYY-MM-DD hh:mm'. "
"used with the 'collector ack action' and pushstats "
"action"),
"broadcast": Option(
"--broadcast", default=None,
action="store", dest="broadcast",
help="list of broadcast addresses, comma separated, "
"used by the 'wol' action"),
"color": Option(
"--color", default="auto",
action="store", dest="color",
help="colorize output. possible values are : auto=guess "
"based on tty presence, always|yes=always colorize,"
" never|no=never colorize"),
"comment": Option(
"--comment", default=None,
action="store", dest="comment",
help="a comment to log when used with the 'collector ack "
"action' action"),
"config": Option(
"--config", default=None, action="store", dest="config",
help="specify a user-specific collector api connection "
"configuration file. defaults to '~/.opensvc-cli'. "
"Honored by the 'collector cli' action."),
"cron": Option(
"--cron", default=False,
action="store_true", dest="cron",
help="cron mode"),
"debug": Option(
"--debug", default=False,
action="store_true", dest="debug",
help="debug mode"),
"duration": Option(
"--duration", default=None,
action="store", dest="duration", type="int",
help="a duration expressed in minutes. used with the "
"'collector ack action' action"),
"end": Option(
"--end", default=None,
action="store", dest="end",
help="a end date expressed as 'YYYY-MM-DD hh:mm'. used "
"with the 'collector ack action' and pushstats "
"action"),
"filterset": Option(
"--filterset", default="",
action="store", dest="filterset",
help="set a filterset to limit collector extractions"),
"force": Option(
"--force", default=False,
action="store_true", dest="force",
help="force action"),
"format": Option(
"--format", default=None, action="store", dest="format",
help="specify a data formatter for output of the print* "
"and collector* commands. possible values are json, csv "
"or table."),
"hba": Option(
"--hba", default=None, action="store", dest="hba",
help="specify a hba to scan for new block devices. Example: "
"5001438002432430 or iqn.1993-08.org.debian:01:659b4bbd68bd"),
"help": Option(
"-h", "--help", default=None,
action="store_true", dest="parm_help",
help="show this help message and exit"),
"id": Option(
"--id", default=0,
action="store", dest="id", type="int",
help="specify an id to act on"),
"insecure": Option(
"--insecure", default=False,
action="store_true", dest="insecure",
help="allow communications with a collector presenting "
"unverified SSL certificates."),
"like": Option(
"--like", default="%",
action="store", dest="like",
help="a sql like filtering expression. leading and "
"trailing wildcards are automatically set."),
"lun": Option(
"--lun", default=None, action="store", dest="lun",
help="specify a logical unit number to scan for new block devices. "
"Example: 1"),
"mac": Option(
"--mac", default=None,
action="store", dest="mac",
help="list of mac addresses, comma separated, used by "
"the 'wol' action"),
"message": Option(
"--message", default="",
action="store", dest="message",
help="the message to send to the collector for logging"),
"module": Option(
"--module", default="",
action="store", dest="module",
help="compliance, set module list"),
"moduleset": Option(
"--moduleset", default="",
action="store", dest="moduleset",
help="compliance, set moduleset list. The 'all' value "
"can be used in conjonction with detach."),
"opt_object": Option(
"--object", default=[], action="append", dest="objects",
help="an object to limit a push* action to. multiple "
"--object parameters can be set on a "
"single command line"),
"param": Option(
"--param", default=None,
action="store", dest="param",
help="point a node configuration parameter for the 'get'"
" and 'set' actions"),
"password": Option(
"--password", default=None,
action="store", dest="password",
help="authenticate with the collector using the "
"specified user credentials instead of the node "
"credentials. Prompted if necessary but not "
"specified."),
"refresh_api": Option(
"--refresh-api", default=False,
action="store_true", dest="refresh_api",
help="The OpenSVC collector api url"),
"ruleset": Option(
"--ruleset", default="",
action="store", dest="ruleset",
help="compliance, set ruleset list. The 'all' value can "
"be used in conjonction with detach."),
"ruleset_date": Option(
"--ruleset-date", default="",
action="store", dest="ruleset_date",
help="compliance, use rulesets valid on specified date"),
"stats_dir": Option(
"--stats-dir", default=None,
action="store", dest="stats_dir",
help="points the directory where the metrics files are "
"stored for pushstats"),
"symcli_db_file": Option(
"--symcli-db-file", default=None,
action="store", dest="symcli_db_file",
help="[pushsym option] use symcli offline mode with the "
"specified file. aclx files are expected to be "
"found in the same directory and named either "
".aclx or .aclx"),
"sync": Option(
"--sync", default=False,
action="store_true", dest="syncrpc",
help="use synchronous collector rpc. to use when chaining "
"a compliance run, to make sure the node ruleset is "
"up-to-date."),
"tag": Option(
"--tag", default=None,
action="store", dest="tag",
help="a tag specifier used by 'collector create tag', "
"'collector add tag', 'collector del tag'"),
"target": Option(
"--target", default=None, action="store", dest="target",
help="specify a target to scan for new block devices. Example: "
"5000097358185088 or iqn.clementine.tgt1"),
"user": Option(
"--user", default=None, action="store", dest="user",
help="authenticate with the collector using the "
"specified user credentials instead of the node "
"credentials. Required for the 'register' action "
"when the collector is configured to refuse "
"anonymous register."),
"value": Option(
"--value", default=None,
action="store", dest="value",
help="set a node configuration parameter value for the "
"'set --param' action"),
"verbose": Option(
"--verbose", default=False,
action="store_true", dest="verbose",
help="add more information to some print commands: +next "
"in 'print schedule'"),
})
GLOBAL_OPTS = [
OPT.color,
OPT.cron,
OPT.debug,
OPT.format,
OPT.help,
]
ACTIONS = {
'Node actions': {
'logs': {
'msg': 'fancy display of the node logs',
},
'shutdown': {
'msg': 'shutdown the node to powered off state',
},
'reboot': {
'msg': 'reboot the node',
},
'scheduler': {
'msg': 'run the node task scheduler',
},
'schedulers': {
'msg': 'execute a run of the node and services schedulers. this '
'action is installed in the system scheduler',
},
'schedule_reboot_status': {
'msg': 'tell if the node is scheduled for reboot',
},
'schedule_reboot': {
'msg': 'mark the node for reboot at the next allowed period. the '
'allowed period is defined by a "reboot" section in '
'node.conf.',
},
'unschedule_reboot': {
'msg': 'unmark the node for reboot at the next allowed period.',
},
'array': {
'msg': 'pass a command to a supported array whose access method '
'and credentials are defined in auth.conf',
},
'updatepkg': {
'msg': 'upgrade the opensvc agent version. the packages must be '
'available behind the node.repo/packages url.',
},
'updatecomp': {
'msg': 'upgrade the opensvc compliance modules. the modules must '
'be available as a tarball behind the node.repo/compliance '
'url.',
},
'scanscsi': {
'msg': 'scan the scsi hosts in search of new disks',
'options': [
OPT.hba,
OPT.target,
OPT.lun,
],
},
'dequeue_actions': {
'msg': "dequeue and execute actions from the collector's action "
"queue for this node and its services.",
},
'rotate_root_pw': {
'msg': "set a new root password and store it in the collector",
},
'print_schedule': {
'msg': 'print the node tasks schedule',
'options': [
OPT.verbose,
],
},
'wol': {
'msg': 'forge and send udp wake on lan packet to mac address '
'specified by --mac and --broadcast arguments',
'options': [
OPT.broadcast,
OPT.mac,
],
},
'collect_stats': {
'msg': "write in local files metrics not found in the standard "
"metrics collector. these files will be fed to the "
"collector by the 'pushstat' action.",
},
},
'Service actions': {
'discover': {
'msg': 'discover vservices accessible from this host, cloud nodes for example',
},
},
'Node configuration': {
'print_config': {
'msg': 'open the node.conf configuration file with the preferred editor',
},
'print_authconfig': {
'msg': 'open the node.conf configuration file with the preferred editor',
},
'edit_config': {
'msg': 'open the node.conf configuration file with the preferred editor',
},
'edit_authconfig': {
'msg': 'open the auth.conf configuration file with the preferred editor',
},
'register': {
'msg': 'obtain a registration number from the collector, used to authenticate the node',
'options': [
OPT.app,
OPT.password,
OPT.user,
],
},
'get': {
'msg': 'get the value of the node configuration parameter pointed by --param',
'options': [
OPT.param,
],
},
'set': {
'msg': 'set a node configuration parameter (pointed by --param) value (pointed by --value)',
'options': [
OPT.param,
OPT.value,
],
},
'unset': {
'msg': 'unset a node configuration parameter (pointed by --param)',
'options': [
OPT.param,
],
},
},
'Push data to the collector': {
'pushasset': {
'msg': 'push asset information to collector',
'options': [
OPT.sync,
],
},
'pushstats': {
'msg': 'push performance metrics to collector. By default pushed '
'stats interval begins yesterday at the beginning of the '
'allowed interval and ends now. This interval can be '
'changed using --begin/--end parameters. The location '
'where stats files are looked up can be changed using '
'--stats-dir.',
'options': [
OPT.begin,
OPT.end,
OPT.stats_dir,
],
},
'pushdisks': {
'msg': 'push disks usage information to collector',
},
'pushpkg': {
'msg': 'push package/version list to collector',
},
'pushpatch': {
'msg': 'push patch/version list to collector',
},
'pushsym': {
'msg': 'push symmetrix configuration to collector',
'options': [
OPT.opt_object,
OPT.symcli_db_file,
],
},
'pushemcvnx': {
'msg': 'push EMC CX/VNX configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushcentera': {
'msg': 'push EMC Centera configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushnetapp': {
'msg': 'push Netapp configuration to collector',
'options': [
OPT.opt_object,
],
},
'pusheva': {
'msg': 'push HP EVA configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushnecism': {
'msg': 'push NEC ISM configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushhds': {
'msg': 'push HDS configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushdcs': {
'msg': 'push Datacore configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushfreenas': {
'msg': 'push FreeNAS configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushxtremio': {
'msg': 'push XtremIO configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushibmsvc': {
'msg': 'push IBM SVC configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushhp3par': {
'msg': 'push HP 3par configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushibmds': {
'msg': 'push IBM DS configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushvioserver': {
'msg': 'push IBM VIO server configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushgcedisks': {
'msg': 'push Google Compute Engine disks configuration to '
'collector',
'options': [
OPT.opt_object,
],
},
'pushbrocade': {
'msg': 'push Brocade switch configuration to collector',
'options': [
OPT.opt_object,
],
},
'pushnsr': {
'msg': 'push EMC Networker index to collector',
'options': [
OPT.opt_object,
],
},
'sysreport': {
'msg': 'push system report to the collector for archiving and '
'diff analysis',
},
'checks': {
'msg': 'run node sanity checks, push results to collector',
},
},
'Misc': {
'prkey': {
'msg': 'show persistent reservation key of this node',
},
},
'Compliance': {
'compliance_auto': {
'msg': 'run compliance checks or fix, according to the autofix '
'property of each module.',
},
'compliance_env': {
'msg': 'show the compliance modules environment variables.',
'options': [
OPT.module,
OPT.moduleset,
],
},
'compliance_check': {
'msg': 'run compliance checks.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_fix': {
'msg': 'run compliance fixes.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_fixable': {
'msg': 'verify compliance fixes prerequisites.',
'options': [
OPT.attach,
OPT.force,
OPT.module,
OPT.moduleset,
OPT.ruleset_date,
],
},
'compliance_list_module': {
'msg': 'list compliance modules available on this node',
},
'compliance_show_moduleset': {
'msg': 'show compliance rules applying to this node',
},
'compliance_list_moduleset': {
'msg': 'list available compliance modulesets. --moduleset f% '
'limit the scope to modulesets matching the f% pattern.',
},
'compliance_attach_moduleset': {
'msg': 'attach moduleset specified by --moduleset for this node',
'options': [
OPT.moduleset,
],
},
'compliance_detach_moduleset': {
'msg': 'detach moduleset specified by --moduleset for this node',
'options': [
OPT.moduleset,
],
},
'compliance_list_ruleset': {
'msg': 'list available compliance rulesets. --ruleset f% limit '
'the scope to rulesets matching the f% pattern.',
},
'compliance_show_ruleset': {
'msg': 'show compliance rules applying to this node',
},
'compliance_show_status': {
'msg': 'show compliance modules status',
},
'compliance_attach': {
'msg': 'attach ruleset specified by --ruleset and/or moduleset '
'specified by --moduleset for this node',
'options': [
OPT.moduleset,
OPT.ruleset,
],
},
'compliance_detach': {
'msg': 'detach ruleset specified by --ruleset and/or moduleset '
'specified by --moduleset for this node',
'options': [
OPT.moduleset,
OPT.ruleset,
],
},
'compliance_attach_ruleset': {
'msg': 'attach ruleset specified by --ruleset for this node',
'options': [
OPT.ruleset,
],
},
'compliance_detach_ruleset': {
'msg': 'detach ruleset specified by --ruleset for this node',
'options': [
OPT.ruleset,
],
},
},
'Collector management': {
'collector_cli': {
'msg': 'open a Command Line Interface to the collector rest API. '
'The CLI offers autocompletion of paths and arguments, '
'piping JSON data from files. This command accepts the '
'--user, --password, --api, --insecure and --config '
'parameters. If executed as root, the collector is '
'logged in with the node credentials.',
'options': [
OPT.user,
OPT.password,
OPT.api,
OPT.insecure,
OPT.config,
OPT.refresh_api,
],
},
'collector_events': {
'msg': 'display node events during the period specified by '
'--begin/--end. --end defaults to now. --begin defaults to '
'7 days ago.',
'options': [
OPT.begin,
OPT.end,
],
},
'collector_alerts': {
'msg': 'display node alerts',
},
'collector_checks': {
'msg': 'display node checks',
},
'collector_disks': {
'msg': 'display node disks',
},
'collector_list_actions': {
'msg': 'list actions on the node, whatever the service, during '
'the period specified by --begin/--end. --end defaults to '
'now. --begin defaults to 7 days ago',
'options': [
OPT.begin,
OPT.end,
],
},
'collector_ack_action': {
'msg': 'acknowledge an action error on the node. an acknowlegment '
'can be completed by --author (defaults to root@nodename) '
'and --comment',
'options': [
OPT.author,
OPT.comment,
],
},
'collector_show_actions': {
'msg': 'show actions detailed log. a single action is specified '
'by --id. a range is specified by --begin/--end dates. '
'--end defaults to now. --begin defaults to 7 days ago',
'options': [
OPT.begin,
OPT.id,
OPT.end,
],
},
'collector_list_nodes': {
'msg': 'show the list of nodes matching the filterset pointed by '
'--filterset',
},
'collector_list_services': {
'msg': 'show the list of services matching the filterset pointed '
'by --filterset',
},
'collector_list_filtersets': {
'msg': 'show the list of filtersets available on the collector. '
'if specified, --filterset limits the resulset '
'to filtersets matching ',
},
'collector_log': {
'msg': 'log a message in the collector\'s node log',
'options': [
OPT.message,
],
},
'collector_asset': {
'msg': 'display asset information known to the collector',
},
'collector_networks': {
'msg': 'display network information known to the collector for '
'each node ip',
},
'collector_tag': {
'msg': 'set a node tag (pointed by --tag)',
'options': [
OPT.tag,
],
},
'collector_untag': {
'msg': 'unset a node tag (pointed by --tag)',
},
'collector_show_tags': {
'msg': 'list all node tags',
},
'collector_list_tags': {
'msg': 'list all available tags. use --like to filter the output.',
'options': [
OPT.like,
],
},
'collector_create_tag': {
'msg': 'create a new tag with name specified by --tag',
'options': [
OPT.tag,
],
},
'collector_search': {
'msg': 'report the collector objects matching --like '
'[:], where is the object type '
'acronym as shown in the collector search widget.',
'options': [
OPT.like,
],
},
},
}
DEPRECATED_ACTIONS = [
"collector_json_asset",
"collector_json_networks",
"collector_json_list_unavailability_ack",
"collector_json_list_actions",
"collector_json_show_actions",
"collector_json_status",
"collector_json_checks",
"collector_json_disks",
"collector_json_alerts",
"collector_json_events",
"collector_json_list_nodes",
"collector_json_list_services",
"collector_json_list_filtersets",
"json_schedule",
]
class NodemgrOptParser(OptParser):
"""
The nodemgr-specific options parser class
"""
def __init__(self, args=None, colorize=True, width=None, formatter=None,
indent=6):
OptParser.__init__(self, args=args, prog=PROG, options=OPT,
actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
global_options=GLOBAL_OPTS,
colorize=colorize, width=width,
formatter=formatter, indent=indent)
opensvc-1.8~20170412/lib/rcCloud.py 0000644 0001750 0001750 00000000674 13073467726 017005 0 ustar jkelbert jkelbert import socket
import rcExceptions as ex
class Cloud(object):
def __init__(self, s, auth):
self.cid = s
self.auth = auth
def list_svcnames(self):
print("todo")
return []
def list_nodes(self):
try:
nodes = self.driver.list_nodes()
except socket.error as e:
raise ex.excError("error connecting to %s cloud url (%s)"%(self.cid, str(e)))
return nodes
opensvc-1.8~20170412/lib/resDiskVgSgHP-UX.py 0000644 0001750 0001750 00000000210 13073467726 020351 0 ustar jkelbert jkelbert Res = __import__("resDiskVgHP-UX")
class Disk(Res.Disk):
def start(self):
return 0
def stop(self):
return 0
opensvc-1.8~20170412/lib/rcDiskInfoWindows.py 0000644 0001750 0001750 00000007016 13073467726 021015 0 ustar jkelbert jkelbert import rcDiskInfo
import wmi
from rcUtilities import justcall, which
class diskInfo(rcDiskInfo.diskInfo):
def __init__(self):
self.h = {}
self.fcluns = {}
self.wmi = wmi.WMI()
def scan_mapping(self):
if len(self.fcluns) > 0:
return
if not which('fcinfo'):
return
for index, portwwn, host in self._get_fc_hbas():
cmd = ['fcinfo', '/mapping', '/ai:'+index]
out, err, ret = justcall(cmd)
if ret != 0:
continue
lines = out.split('\n')
for i, line in enumerate(lines):
if line.startswith('( '):
l = line.split()
if len(l) < 3:
continue
bus = int(l[-3].strip(','))
target = int(l[-2].strip(','))
lun = int(l[-1].strip(')'))
_index = (host, bus, target, lun)
elif line.startswith('(cs:'):
l = line.split()
if len(l) < 2:
continue
wwid = l[-1].strip(')')
self.fcluns[_index] = dict(wwid=wwid)
def scan(self):
self.scan_mapping()
vid = 'unknown'
pid = 'unknown'
wwid = 'unknown'
size = 'unknown'
for drive in self.wmi.WIN32_DiskDrive():
id = drive.DeviceID
vid = str(drive.Manufacturer)
pid = str(drive.Caption)
try:
serial = str(drive.SerialNumber)
except:
serial = "unknown"
size = int(drive.Size) // 1024 // 1024
host = drive.SCSIPort
bus = drive.SCSIBus
target = drive.SCSITargetId
lun = drive.SCSILogicalUnit
d = dict(id=id,
vid=vid,
pid=pid,
wwid=wwid,
serial=serial,
host=host,
bus=bus,
target=target,
lun=lun,
size=size)
d['wwid'] = self.get_wwid(d)
if d['wwid'] is None:
d['wwid'] = d['serial']
self.h[id] = d
def get_wwid(self, d):
index = (d['host'], d['bus'], d['target'], d['lun'])
if index not in self.fcluns:
return None
return self.fcluns[index]['wwid']
def get(self, id, prop):
if len(self.h) == 0:
self.scan()
if id not in self.h:
return None
return self.h[id][prop]
def disk_id(self, dev):
return self.get(dev, 'wwid')
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
return self.get(dev, 'size')
def _get_fc_hbas(self):
hbas = []
if not which('fcinfo'):
return []
cmd = ['fcinfo']
out, err, ret = justcall(cmd)
if ret != 0:
return []
for line in out.split('\n'):
if 'PortWWN' not in line:
continue
l = line.split()
i = l.index('PortWWN:')
if len(l) < i+2:
continue
index = l[0].split('-')[-1].strip(':')
portwwn = l[i+1].replace(':', '')
host = int(l[-1].split('Scsi')[-1].strip(':'))
hbas.append((index, portwwn, host))
return hbas
opensvc-1.8~20170412/lib/resIpVcsLinux.py 0000644 0001750 0001750 00000000177 13073467726 020166 0 ustar jkelbert jkelbert Res = __import__("resIpLinux")
class Ip(Res.Ip):
def start(self):
return 0
def stop(self):
return 0
opensvc-1.8~20170412/lib/checkMpathPowerpathHP-UX.py 0000777 0001750 0001750 00000000000 13073467726 026436 2checkMpathPowerpath.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resDisk.py 0000644 0001750 0001750 00000003722 13073467726 017013 0 ustar jkelbert jkelbert """
Base disk resource driver module.
"""
import os
import resources as Res
import rcStatus
import rcExceptions as exc
from rcGlobalEnv import rcEnv
class Disk(Res.Resource):
"""
Base disk resource driver, derived for LVM, Veritas, ZFS, ...
"""
def __init__(self, rid=None, name=None, **kwargs):
Res.Resource.__init__(self, rid, **kwargs)
self.name = name
self.disks = set()
self.devs = set()
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def disklist(self):
return self.disks
def has_it(self): return False
def is_up(self): return False
def do_start(self): return False
def do_stop(self): return False
def stop(self):
self.do_stop()
def start(self):
self.do_start()
def _status(self, verbose=False):
if rcEnv.nodename in self.always_on:
if self.is_up(): return rcStatus.STDBY_UP
else: return rcStatus.STDBY_DOWN
else:
if self.is_up(): return rcStatus.UP
else: return rcStatus.DOWN
def create_static_name(self, dev, suffix="0"):
d = self.create_dev_dir()
lname = self.rid.replace("#", ".") + "." + suffix
l = os.path.join(d, lname)
if os.path.exists(l) and os.path.realpath(l) == dev:
return
self.log.info("create static device name %s -> %s" % (l, dev))
try:
os.unlink(l)
except:
pass
os.symlink(dev, l)
def create_dev_dir(self):
d = os.path.join(rcEnv.pathvar, self.svc.svcname, "dev")
if os.path.exists(d):
return d
os.makedirs(d)
return d
if __name__ == "__main__":
for c in (Disk,) :
help(c)
print("""d=Disk("aGenericDisk")""")
d=Disk("aGenericDisk")
print("show d", d)
print("""d.do_action("start")""")
d.do_action("start")
print("""d.do_action("stop")""")
d.do_action("stop")
opensvc-1.8~20170412/lib/resIpSgLinux.py 0000644 0001750 0001750 00000000177 13073467726 020004 0 ustar jkelbert jkelbert Res = __import__("resIpLinux")
class Ip(Res.Ip):
def start(self):
return 0
def stop(self):
return 0
opensvc-1.8~20170412/lib/provDiskRadosLinux.py 0000644 0001750 0001750 00000002040 13073467726 021211 0 ustar jkelbert jkelbert from provisioning import Provisioning
import rcExceptions as ex
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def provisioner(self):
for image in self.r.images:
self.provisioner_one(image)
self.r.log.info("provisioned")
self.r.start()
return True
def provisioner_one(self, image):
if self.r.exists(image):
self.r.log.info("%s already provisioned"%image)
return
try:
size = self.r.svc.config.get(self.r.rid, 'size')
except:
raise ex.excError("'size' provisioning parameter not set")
try:
image_format = self.r.svc.config.get(self.r.rid, 'image_format')
except:
image_format = None
cmd = self.r.rbd_rcmd() + ['create', '--size', str(size), image]
if image_format:
cmd += ["--image-format", str(image_format)]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
opensvc-1.8~20170412/lib/resSyncNetapp.py 0000644 0001750 0001750 00000023633 13073467726 020210 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which, justcall
import rcExceptions as ex
import rcStatus
import time
import resSync
import datetime
class syncNetapp(resSync.Sync):
def master(self):
s = self.local_snapmirror_status()
return s['master']
def slave(self):
s = self.local_snapmirror_status()
return s['slave']
def local(self):
if rcEnv.nodename in self.filers:
return self.filers[rcEnv.nodename]
return None
def _cmd(self, cmd, target, info=False):
if target == "local":
filer = self.local()
elif target == "master":
filer = self.master()
elif target == "slave":
filer = self.slave()
elif target in self.filers.values():
filer = target
else:
raise ex.excError("unable to find the %s filer"%target)
_cmd = rcEnv.rsh.split() + [self.user+'@'+filer] + cmd
if info:
self.log.info(' '.join(_cmd))
out, err, ret = justcall(rcEnv.rsh.split() + [self.user+'@'+filer] + cmd)
if info:
if len(out) > 0:
self.log.info(out)
if len(err) > 0:
self.log.error(err)
return ret, out, err
def cmd_master(self, cmd, info=False):
return self._cmd(cmd, "master", info=info)
def cmd_slave(self, cmd, info=False):
return self._cmd(cmd, "slave", info=info)
def cmd_local(self, cmd, info=False):
return self._cmd(cmd, "local", info=info)
def lag_to_ts(self, lag):
now = datetime.datetime.now()
l = lag.split(":")
if len(l) != 3:
raise ex.excError("unexpected lag format")
delta = datetime.timedelta(hours=int(l[0]),
minutes=int(l[1]),
seconds=int(l[2]))
return now - delta
def can_sync(self, target=None, s=None):
if s is None:
s = self.snapmirror_status(self.slave())
ts = self.lag_to_ts(s['lag'])
if self.skip_sync(ts):
return False
return True
def lagged(self, lag, max=None):
if max is None:
max = self.sync_max_delay
l = lag.split(":")
if len(l) != 3:
raise ex.excError("unexpected lag format")
if int(l[0]) * 60 + int(l[1]) > max:
return True
return False
def sync_resync(self):
(ret, buff, err) = self.cmd_slave(['snapmirror', 'resync', '-f', self.slave()+':'+self.path_short], info=True)
if ret != 0:
raise ex.excError
def syncswap(self):
master = self.master()
slave = self.slave()
s = self.snapmirror_status(self.local())
if s['state'] != "Broken-off":
self.log.error("can not swap: snapmirror is not in state Broken-off")
raise ex.excError
src = slave+':'+self.path_short
dst = master+':'+self.path_short
(ret, buff, err) = self._cmd(['snapmirror', 'resync', '-f', '-S', src, dst], master, info=True)
if ret != 0:
raise ex.excError(err)
(ret, buff, err) = self._cmd(['snapmirror', 'release', self.path_short, src], master, info=True)
if ret != 0:
raise ex.excError(err)
(ret, buff, err) = self._cmd(['snapmirror', 'status', '-l', dst], slave, info=False)
if ret != 0:
raise ex.excError(err)
snap = ""
state = ""
for line in buff.split('\n'):
l = line.split()
if len(l) < 2:
continue
if l[0] == "State:":
state = l[1]
if state != "Broken-off":
continue
if l[0] == "Base" and l[1] == "Snapshot:":
snap = l[-1]
break
if len(snap) == 0:
self.log.error("can not determine base snapshot name to remove on %s"%slave)
raise ex.excError
import time
time.sleep(5)
(ret, buff, err) = self._cmd(['snap', 'delete', self.path_short, snap], slave, info=True)
if ret != 0:
raise ex.excError(err)
def sync_update(self):
s = self.snapmirror_status(self.slave())
if not self.can_sync(s=s):
return
if s['state'] == "Quiesced":
self.log.error("update not applicable: quiesced")
return
if s['state'] == "Snapmirrored" and s['status'] == "Transferring":
self.log.info("update not applicable: transfer in progress")
return
if s['state'] != "Snapmirrored" or s['status'] != "Idle":
self.log.error("update not applicable: not in snapmirror idle status")
return
(ret, buff, err) = self.cmd_slave(['snapmirror', 'update', self.slave()+':'+self.path_short], info=True)
if ret != 0:
raise ex.excError
def sync_resume(self):
s = self.snapmirror_status(self.slave())
if s['state'] != "Quiesced":
self.log.info("resume not applicable: not quiesced")
return
(ret, buff, err) = self.cmd_slave(['snapmirror', 'resume', self.slave()+':'+self.path_short], info=True)
if ret != 0:
raise ex.excError
def sync_quiesce(self):
s = self.snapmirror_status(self.slave())
if s['state'] == "Quiesced":
self.log.info("already quiesced")
return
elif s['state'] != "Snapmirrored":
self.log.error("Can not quiesce: volume not in Snapmirrored state")
raise ex.excError
if s['status'] == "Pending":
self.log.error("Can not quiesce: volume in snapmirror Pending status")
raise ex.excError
(ret, buff, err) = self.cmd_slave(['snapmirror', 'quiesce', self.slave()+':'+self.path_short], info=True)
if ret != 0:
raise ex.excError
self.wait_quiesce()
def sync_break(self):
(ret, buff, err) = self.cmd_slave(['snapmirror', 'break', self.slave()+':'+self.path_short], info=True)
if ret != 0:
raise ex.excError
self.wait_break()
def wait_quiesce(self):
timeout = 60
self.log.info("start waiting quiesce to finish (max %s seconds)"%(timeout*5))
for i in range(timeout):
s = self.snapmirror_status(self.slave())
if s['state'] == "Quiesced" and s['status'] == "Idle":
return
time.sleep(5)
self.log.error("timed out waiting for quiesce to finish")
raise ex.excError
def wait_break(self):
timeout = 20
for i in range(timeout):
s = self.snapmirror_status(self.slave())
if s['state'] == "Broken-off" and s['status'] == "Idle":
return
time.sleep(5)
self.log.error("timed out waiting for break to finish")
raise ex.excError
def snapmirror_status(self, filer):
(ret, buff, err) = self._cmd(['snapmirror', 'status'], filer, info=False)
if ret != 0:
raise ex.excError("can get snapmirror status from %s: %s"%(filer, err))
key = ':'.join([filer, self.path_short])
list = []
for line in buff.split('\n'):
l = line.split()
if len(l) < 5:
continue
if l[2] == "Uninitialized":
continue
if l[0] == key or l[1] == key:
list.append(l)
if len(list) == 0:
raise ex.excError("%s not found in snapmirror status"%self.path_short)
elif len(list) == 1:
l = list[0]
master = l[0].split(':')[0]
slave = l[1].split(':')[0]
return dict(master=master, slave=slave, state=l[2], lag=l[3], status=l[4])
else:
raise ex.excError("%s is in an unsupported state. Please repair manually."%filer)
def local_snapmirror_status(self):
return self.snapmirror_status(self.local())
def start(self):
if self.local() == self.master():
self.log.info("%s is already replication master"%self.local())
return
s = self.snapmirror_status(self.slave())
if s['state'] != "Broken-off":
try:
self.sync_quiesce()
except:
if self.svc.options.force:
self.log.warning("force mode is on. bypass failed quiesce.")
pass
else:
self.log.error("set force mode to bypass")
raise ex.excError
self.sync_break()
if rcEnv.node_env == "PRD":
self.syncswap()
def stop(self):
pass
def _status(self, verbose=False):
try:
s = self.snapmirror_status(self.slave())
except ex.excError as e:
self.status_log(str(e))
return rcStatus.UNDEF
if s['state'] == "Snapmirrored":
if "Transferring" in s['status']:
self.log.debug("snapmirror transfer in progress")
return rcStatus.WARN
elif self.lagged(s['lag']):
self.log.debug("snapmirror lag beyond sync_max_delay")
return rcStatus.WARN
else:
return rcStatus.UP
return rcStatus.DOWN
def __init__(self,
rid=None,
filers={},
path=None,
user=None,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.netapp",
**kwargs)
self.label = "netapp %s on %s"%(path, ', '.join(filers.values()))
self.filers = filers
self.path = path
self.user = user
self.path_short = self.path.replace('/vol/','')
def __str__(self):
return "%s filers=%s user=%s path=%s" % (resSync.Sync.__str__(self),\
self.filers, self.user, self.path)
opensvc-1.8~20170412/lib/checkLagLinux.py 0000644 0001750 0001750 00000007731 13073467726 020134 0 ustar jkelbert jkelbert import checks
import os
import json
import glob
import datetime
from rcGlobalEnv import rcEnv
from rcIfconfigLinux import ifconfig
"""
Ethernet Channel Bonding Driver: v3.4.0 (October 7, 2008)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: None
Currently Active Slave: eth0
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth0
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:23:7d:a0:20:fa
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:23:7d:a0:20:f6
"""
class check(checks.check):
chk_type = "lag"
chk_name = "Linux network link aggregate"
bonding_p = '/proc/net/bonding'
def do_check(self):
l = glob.glob(self.bonding_p+'/*')
if len(l) == 0:
return self.undef
ifg = ifconfig()
r = []
for bond in l:
ifname = os.path.basename(bond)
intf = ifg.interface(ifname)
if intf is None:
continue
if len(intf.ipaddr) + len(intf.ip6addr) == 0:
continue
r += self.do_check_bond(bond)
return r
def get_cache(self, bond, slave, uptime):
cache_p = self.cache_path(bond, slave)
try:
with open(cache_p, 'r') as f:
buff = f.read()
data = json.loads(buff)
prev_uptime, prev_val = data
except:
prev_uptime, prev_val = 0, 0
if prev_uptime >= uptime:
# reboot
prev_uptime, prev_val = 0, 0
return prev_uptime, prev_val
def uptime(self):
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
return uptime_seconds
def cache_path(self, bond, slave):
cache_p = os.path.join(rcEnv.pathtmp, "checkLagLinux.cache."+os.path.basename(bond)+"."+slave)
return cache_p
def write_cache(self, bond, slave, val, uptime):
cache_p = self.cache_path(bond, slave)
with open(cache_p, 'w') as f: f.write(json.dumps([uptime, val]))
try:
with open(cache_p, 'w') as f:
f.write(json.dumps([uptime, val]))
except:
pass
def do_check_bond(self, bond):
r = []
try:
f = open(bond, 'r')
buff = f.read()
f.close()
except:
return r
n_slave = 0
lag = os.path.basename(bond)
inst = lag
for line in buff.split('\n'):
if line.startswith('Slave Interface:'):
n_slave += 1
slave = line.split()[-1]
inst = '.'.join((lag, slave))
elif line.startswith('MII Status:'):
val = line.split()[-1]
if val == "up":
val = "0"
else:
val = "1"
r.append({
'chk_instance': inst+'.mii_status',
'chk_value': val,
'chk_svcname': '',
})
elif line.startswith('Link Failure Count:'):
val = int(line.split()[-1])
uptime = self.uptime()
prev_uptime, prev_val = self.get_cache(bond, slave, uptime)
if uptime - prev_uptime > 3600:
# don't mask alerts by refreshing the cache too soon
self.write_cache(bond, slave, val, uptime)
# Link Failure Count per hour
val = 3600. * (val - prev_val) / (uptime - prev_uptime)
r.append({
'chk_instance': inst+'.link_failure_per_hour',
'chk_value': "%.2f"%val,
'chk_svcname': '',
})
r.append({
'chk_instance': lag+'.paths',
'chk_value': str(n_slave),
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/provFsVxfs.py 0000644 0001750 0001750 00000000226 13073467726 017531 0 ustar jkelbert jkelbert import provFs
class ProvisioningFs(provFs.ProvisioningFs):
mkfs = ['newfs', '-F', 'vxfs', '-o', 'largefiles', '-b', '8192']
info = ['fstyp']
opensvc-1.8~20170412/lib/rcIbmSvc.py 0000644 0001750 0001750 00000005555 13073467726 017125 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
import rcExceptions as ex
import os
import ConfigParser
from rcGlobalEnv import rcEnv
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def rcmd(cmd, manager, username, key):
_cmd = ['ssh', '-i', key, '@'.join((username, manager))]
_cmd += [cmd]
out, err, ret = justcall(_cmd)
if ret != 0:
print(_cmd)
print(out)
raise ex.excError("ssh command execution error")
return out, err
class IbmSvcs(object):
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.arrays = []
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = {}
for s in conf.sections():
if not conf.has_option(s, "type") or \
conf.get(s, "type") != "ibmsvc":
continue
if self.filtering and not s in self.objects:
continue
try:
username = conf.get(s, 'username')
key = conf.get(s, 'key')
m[s] = [username, key]
except:
print("error parsing section", s)
pass
del(conf)
for name, creds in m.items():
username, key = creds
self.arrays.append(IbmSvc(name, username, key))
def __iter__(self):
for array in self.arrays:
yield(array)
class IbmSvc(object):
def __init__(self, name, username, key):
self.name = name
self.username = username
self.key = key
#self.keys = ['lsvdisk']
self.keys = ['lsvdisk', 'lsmdiskgrp', 'lsnode', 'lscluster', 'svc_product_id', 'lsfabric']
def rcmd(self, cmd):
return rcmd(cmd, self.name, self.username, self.key)
def get_lsvdisk(self):
cmd = 'lsvdisk -delim :'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsmdiskgrp(self):
cmd = 'lsmdiskgrp -delim :'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsnode(self):
cmd = 'svcinfo lsnode -delim !'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lscluster(self):
cmd = 'svcinfo lscluster -delim :'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_lsfabric(self):
cmd = 'lsfabric -delim :'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
def get_svc_product_id(self):
cmd = 'echo $SVC_PRODUCT_ID'
print("%s: %s"%(self.name, cmd))
return self.rcmd(cmd)[0]
if __name__ == "__main__":
o = IbmSvcs()
for ibmsvc in o:
print(ibmsvc.lsmdiskgrp())
opensvc-1.8~20170412/lib/checkRaidSmartArrayWindows.py 0000644 0001750 0001750 00000001010 13073467726 022631 0 ustar jkelbert jkelbert import checkRaidSmartArray
import os
sep = ';'
path_list = os.environ['PATH'].split(sep)
if 'PROGRAMFILES' in os.environ:
path_list.append(os.path.join(os.environ.get('PROGRAMFILES'),
'compaq', 'hpacucli', 'bin'))
if 'PROGRAMFILES(X86)' in os.environ:
path_list.append(os.path.join(os.environ.get('PROGRAMFILES(X86)'),
'compaq', 'hpacucli', 'bin'))
os.environ['PATH'] = sep.join(path_list)
class check(checkRaidSmartArray.check):
pass
opensvc-1.8~20170412/lib/lock.py 0000644 0001750 0001750 00000012167 13073467726 016342 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import time
import json
import rcExceptions as ex
from rcGlobalEnv import rcEnv
class lockTimeout(Exception):
""" acquire lock timed out
"""
class lockNoLockFile(Exception):
""" no lockfile specified
"""
class lockCreateError(Exception):
""" could not create lockfile
"""
class lockAcquire(Exception):
""" could not acquire lock on lockfile
"""
LOCK_EXCEPTIONS = (
lockTimeout,
lockNoLockFile,
lockCreateError,
lockAcquire,
)
def monlock(timeout=0, delay=0, fname='svcmon.lock'):
lockfile = os.path.join(rcEnv.pathlock, fname)
try:
lockfd = lock(timeout=timeout, delay=delay, lockfile=lockfile)
except lockTimeout:
print("timed out waiting for lock (%s)"%lockfile)
raise ex.excError
except lockNoLockFile:
print("lock_nowait: set the 'lockfile' param")
raise ex.excError
except lockCreateError:
print("can not create lock file (%s)"%lockfile)
raise ex.excError
except lockAcquire as e:
print("another svcmon is currently running (pid=%s)"%e.pid)
raise ex.excError
except:
print("unexpected locking error (%s)"%lockfile)
raise ex.excError
return lockfd
def monunlock(lockfd):
unlock(lockfd)
def lock(timeout=30, delay=1, lockfile=None, intent=None):
if timeout == 0 or delay == 0:
l = [0]
else:
l = range(int(timeout/delay))
if len(l) == 0:
l = [0]
err = ""
for i in l:
if i > 0:
time.sleep(delay)
try:
fd = lock_nowait(lockfile, intent)
return fd
except lockAcquire as e:
err = str(e)
except Exception:
raise
raise lockTimeout(err)
def lock_nowait(lockfile=None, intent=None):
if lockfile is None:
raise lockNoLockFile
data = {"pid": os.getpid(), "intent": intent}
dir = os.path.dirname(lockfile)
if not os.path.exists(dir):
os.makedirs(dir)
try:
with open(lockfile, 'r') as fd:
buff = fd.read()
prev_data = json.loads(buff)
fd.close()
#print("lock data from file", lockfile, prev_data)
if type(prev_data) != dict or "pid" not in prev_data or "intent" not in prev_data:
prev_data = {"pid": 0, "intent": ""}
#print("lock data corrupted", lockfile, prev_data)
except Exception as e:
prev_data = {"pid": 0, "intent": ""}
#print("error reading lockfile", lockfile, prev_data, str(e))
""" test if we already own the lock
"""
if prev_data["pid"] == os.getpid():
return
if os.path.isdir(lockfile):
raise lockCreateError("lockfile points to a directory")
try:
flags = os.O_RDWR|os.O_CREAT
if rcEnv.sysname != 'Windows':
flags |= os.O_SYNC
lockfd = os.open(lockfile, flags, 0o644)
except Exception as e:
raise lockCreateError(str(e))
try:
""" FD_CLOEXEC makes sure the lock is the held by processes
we fork from this process
"""
if os.name == 'posix':
import fcntl
fcntl.flock(lockfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
flags = fcntl.fcntl(lockfd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
""" acquire lock
"""
fcntl.fcntl(lockfd, fcntl.F_SETFD, flags)
elif os.name == 'nt':
import msvcrt
size = os.path.getsize(lockfile)
msvcrt.locking(lockfd, msvcrt.LK_RLCK, size)
""" drop our pid and intent in the lockfile, best effort
"""
fd = lockfd
try:
os.ftruncate(lockfd, 0)
os.write(lockfd, json.dumps(data))
os.fsync(lockfd)
except:
pass
return fd
except IOError:
raise lockAcquire("holder pid %(pid)d, holder intent '%(intent)s'" % prev_data)
except:
raise
def unlock(lockfd):
if lockfd is None:
return
try:
os.close(lockfd)
except:
""" already released by a parent process ?
"""
pass
if __name__ == "__main__":
import optparse
import time
import sys
parser = optparse.OptionParser()
parser.add_option("-f", "--file", default="/tmp/test.lock", action="store", dest="file",
help="The file to lock")
parser.add_option("-i", "--intent", default="test", action="store", dest="intent",
help="The lock intent")
parser.add_option("-t", "--time", default=60, action="store", type="int", dest="time",
help="The time we will hold the lock")
parser.add_option("--timeout", default=1, action="store", type="int", dest="timeout",
help="The time before failing to acquire the lock")
(options, args) = parser.parse_args()
try:
lockfd = lock(timeout=options.timeout, delay=1, lockfile=options.file, intent=options.intent)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
print("lock acquired")
try:
time.sleep(options.time)
except KeyboardInterrupt:
pass
opensvc-1.8~20170412/lib/resSyncDcsSnap.py 0000644 0001750 0001750 00000014136 13073467726 020312 0 ustar jkelbert jkelbert import os
import rcExceptions as ex
import rcStatus
import datetime
import resSyncDcs
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
class syncDcsSnap(resSyncDcs.SyncDcs):
def can_sync(self, target=None):
ts = None
""" get oldest snap
"""
for snap in self.snapname:
info = self.get_snap(snap)
if info is None:
self.log.debug("snap %s missing"%snap)
return True
_ts = info['TimeStamp']
if ts is None or _ts < ts:
ts = _ts
return not self.skip_sync(ts)
def update_snap(self):
cmd = ""
vars = ""
for i, snap in enumerate(self.snapname):
cmd += '$v%d=get-dcssnapshot -snapshot %s -connection %s;'%(i, snap, self.conn)
vars += '$v%d '%i
cmd += "echo %s|update-dcssnapshot -Y -connection %s"%(vars, self.conn)
self.dcscmd(cmd, verbose=True)
def get_snaps(self):
cmd = ""
for i, snap in enumerate(self.snapname):
cmd += 'get-dcssnapshot -snapshot %s -connection %s;'%(snap, self.conn)
try:
ret, out, err = self.dcscmd(cmd)
except:
return
"""
SourceLogicalDiskId : ef989edf-4a6d-4af6-8b9d-bc6d2070a36c
DestinationLogicalDiskId : 24a6a20f-59f3-4c4d-b4de-9a4f45afff44
Type : Full
TimeStamp : 17/01/2013 14:16:49
ActiveOperation : NoOperation
State : Migrated
Failure : NoFailure
SequenceNumber : 4677814244
Id : V.{06C86883-CF53-11E1-9203-441EA14CCCC6}-00000177--V
.{06C86883-CF53-11E1-9203-441EA14CCCC6}-000001D7
Caption : S64lmwbic6f-22-clone-02
ExtendedCaption : S64lmwbic6f-22-clone-02 on SDSLMW03
Internal : False
SourceLogicalDiskId : f0450ff7-076f-4dbc-bee0-25f6a586f5b2
DestinationLogicalDiskId : 17e98dbf-1457-41c9-aaad-5b6d7d8cc81c
Type : Full
TimeStamp : 17/01/2013 14:16:49
ActiveOperation : NoOperation
State : Migrated
Failure : NoFailure
SequenceNumber : 4677814247
Id : V.{06C86883-CF53-11E1-9203-441EA14CCCC6}-0000017A--V
.{06C86883-CF53-11E1-9203-441EA14CCCC6}-000001DA
Caption : S64lmwbic6f-25-clone-02
ExtendedCaption : S64lmwbic6f-25-clone-02 on SDSLMW03
Internal : False
"""
info = {}
lines = out.split('\n')
for line in lines:
l = line.split(': ')
if len(l) != 2:
continue
var = l[0].strip()
val = l[1].strip()
if var == 'Internal' and len(info) > 0:
self._info[info['Caption']] = info
info = {}
if var == 'TimeStamp':
info['TimeStamp'] = datetime.datetime.strptime(val, "%d/%m/%Y %H:%M:%S")
elif var in ['Type', 'State', 'ActiveOperation', 'Failure', 'Caption']:
info[var] = val
def get_snap(self, snap):
if len(self._info) == 0:
self.get_snaps()
if snap not in self._info:
return None
return self._info[snap]
def no_status(self):
try:
self.pre_sync_check_flex_primary()
except ex.excAbortAction:
return True
s = self.svc.group_status(excluded_groups=set(["sync", "hb", "app"]))
if s['overall'].status not in [rcStatus.UP, rcStatus.NA]:
return True
return False
def _status(self, verbose=False, skip_prereq=False):
if self.no_status():
self.status_log("skip on secondary node")
return rcStatus.NA
err = False
errlog = []
log = []
try:
self.get_auth()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
for snap in self.snapname:
info = self.get_snap(snap)
if info is None:
errlog.append("snapshot %s does not exists"%snap)
err |= True
continue
if info['State'] not in ['Healthy','Migrated']:
errlog.append("snapshot %s state is %s"%(snap, info['State']))
err |= True
if info['Failure'] not in ['NoFailure']:
errlog.append("snapshot %s failure state is %s"%(snap, info['Failure']))
err |= True
if info['TimeStamp'] < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
errlog.append("snapshot %s too old"%snap)
err |= True
log.append("last update on %s"%info['TimeStamp'])
if err:
self.status_log('\n'.join(errlog))
return rcStatus.WARN
self.status_log('\n'.join(log))
return rcStatus.UP
def sync_resync(self):
self.update_snap()
def sync_update(self):
self.update_snap()
def refresh_svcstatus(self):
self.svcstatus = self.svc.group_status(excluded_groups=set(["sync", 'hb', 'app']))
def get_svcstatus(self):
if len(self.svcstatus) == 0:
self.refresh_svcstatus()
def __init__(self,
rid=None,
snapname=set([]),
manager=set([]),
dcs=set([]),
**kwargs):
resSyncDcs.SyncDcs.__init__(self,
rid=rid,
type="sync.dcssnap",
manager=manager,
dcs=dcs,
**kwargs)
self.label = "DCS snapshot %s"%', '.join(snapname)
self.snapname = snapname
self._info = {}
self.default_schedule = "@0"
def __str__(self):
return "%s dcs=%s manager=%s snapname=%s" % (
resSync.Sync.__str__(self),
' '.join(self.dcs),
' '.join(self.manager),
' '.join(self.snapname))
opensvc-1.8~20170412/lib/rcAmazon.py 0000644 0001750 0001750 00000002700 13073467726 017154 0 ustar jkelbert jkelbert import json
from rcUtilities import which, justcall
import rcExceptions as ex
class Amazon(object):
instance_id = None
instance_data = None
def aws(self, cmd, verbose=True):
if hasattr(self.svc, "aws") and which(self.svc.aws) is not None:
_cmd = [self.svc.aws]
else:
_cmd = ["aws"]
_cmd += ["--output=json"]
if hasattr(self.svc, "aws_profile"):
_cmd += ["--profile", self.svc.aws_profile]
_cmd += cmd
if verbose:
self.log.info(" ".join(_cmd))
out, err, ret = justcall(_cmd)
if ret != 0:
raise ex.excError(err)
data = json.loads(out)
return data
def get_instance_id(self):
if self.instance_id is not None:
return self.instance_id
import httplib
c = httplib.HTTPConnection("instance-data")
c.request("GET", "/latest/meta-data/instance-id")
self.instance_id = c.getresponse().read()
return self.instance_id
def get_instance_data(self, refresh=False):
if self.instance_data is not None and not refresh:
return self.instance_data
data = self.aws(["ec2", "describe-instances", "--instance-ids", self.get_instance_id()], verbose=False)
try:
self.instance_data = data["Reservations"][0]["Instances"][0]
except Exception as e:
self.instance_data = None
return self.instance_data
opensvc-1.8~20170412/lib/resDiskDisk.py 0000644 0001750 0001750 00000013713 13073467726 017627 0 ustar jkelbert jkelbert from __future__ import print_function
import json
import time
import resources as Res
from rcGlobalEnv import rcEnv
from svcBuilder import conf_get_string_scope, conf_get_int_scope
from rcUtilities import lazy
import rcExceptions as ex
class Disk(Res.Resource):
""" SAN Disk resource
"""
def __init__(self, rid=None, disk_id=None, **kwargs):
Res.Resource.__init__(self, rid, "disk.disk", **kwargs)
self.disk_id = disk_id if disk_id != "" else None
self.set_label()
def set_label(self):
if self.disk_id is None:
self.label = "unprovisionned disk"
else:
self.label = "disk "+str(self.disk_id)
def info(self):
return self.fmt_info([
["disk_id", self.disk_id],
])
def __str__(self):
return "%s disk disk_id=%s" % (
Res.Resource.__str__(self),
str(self.disk_id),
)
@lazy
def array_name(self):
try:
return conf_get_string_scope(self.svc, self.svc.config, self.rid, "array")
except:
raise ex.excError("disk %s: missing the 'array' provisioning parameter" % self.rid)
@lazy
def diskgroup(self):
try:
return conf_get_string_scope(self.svc, self.svc.config, self.rid, "diskgroup")
except:
raise ex.excError("disk %s: missing the 'diskgroup' provisioning parameter" % self.rid)
@lazy
def array_id(self):
data = self.svc.collector_rest_get("/arrays", {
"filters": "array_name "+self.array_name,
"props": "id",
})
if "error" in data:
raise ex.excError(data["error"])
if data["meta"]["total"] != 1:
raise ex.excError("array %s has %d matching candidates" % (self.array_name, data["meta"]["total"]))
return data["data"][0]["id"]
def get_form_id(self, form_name):
data = self.svc.collector_rest_get("/forms", {
"filters": "form_name "+form_name,
"props": "id",
})
if "error" in data:
raise ex.excError(data["error"])
if data["meta"]["total"] != 1:
raise ex.excError("form %s has %d matching candidates" % (form_name, data["meta"]["total"]))
return data["data"][0]["id"]
def wait_results(self, results):
"""
Ask the collector for a submitted form-based action results
until the action is completed.
The results format is:
{
'status': 'QUEUED',
'request_data': {},
'returncode': 0,
'log': {
'output-0': [
],
},
'outputs': {
'output-0': {
},
},
'results_id': 300,
'outputs_order': ['output-0']
}
"""
logs = {}
while True:
data = self.get_results(results)
if "error" in data:
raise ex.excError(data["error"])
for output, log in data["log"].items():
if output not in logs:
logs[output] = 0
if len(log) > logs[output]:
for lvl, fmt, d in log[logs[output]:]:
if len(fmt) == 0:
continue
try:
msg = fmt % d
except:
msg = "corrupted collector request log line"
if lvl == 0:
self.log.info(msg)
else:
self.log.error(msg)
logs[output] = len(log)
if data["status"] == "COMPLETED":
if data["returncode"] != 0:
raise ex.excError("collector request completed with errors")
return data
time.sleep(1)
def get_results(self, results):
data = self.svc.collector_rest_get("/form_output_results/%d" % results["results_id"])
return data
def provision(self):
if self.disk_id is not None:
self.log.info("skip provision: 'disk_id' is already set")
return
try:
size = conf_get_string_scope(self.svc, self.svc.config, self.rid, "size")
except:
raise ex.excError("disk %s: missing the 'size' provisioning parameter" % self.rid)
try:
slo = conf_get_string_scope(self.svc, self.svc.config, self.rid, "slo")
except:
slo = None
handler = "/services/self/disks"
data = {
"action": "provision",
"size": size,
"array_name": self.array_name,
"diskgroup": self.diskgroup,
}
if slo is not None:
data["slo"] = slo
results = self.svc.collector_rest_put(handler, data)
if "error" in results:
raise ex.excError(results["error"])
self.log.info("disk provision request sent to the collector (id %d). "
"waiting for completion." % results["results_id"])
results = self.wait_results(results)
self.disk_id = results["outputs"]["add disk"][0]["disk_id"]
self.set_label()
self.svc.config.set(self.rid, "disk_id", self.disk_id)
self.svc.write_config()
self.log.info("disk %s provisionned" % self.disk_id)
def unprovision(self):
handler = "/services/self/disks"
data = {
"action": "unprovision",
"disk_id": self.disk_id,
}
results = self.svc.collector_rest_put(handler, data)
if "error" in results:
raise ex.excError(results["error"])
self.log.info("disk unprovision request sent to the collector (id %d). "
"waiting for completion." % results["results_id"])
results = self.wait_results(results)
self.svc.config.set(self.rid, "disk_id", "")
self.svc.write_config()
self.log.info("unprovisionned")
opensvc-1.8~20170412/lib/resSyncDds.py 0000644 0001750 0001750 00000032174 13073467726 017473 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which
from rcUtilitiesLinux import lv_info
from subprocess import *
import rcExceptions as ex
import rcStatus
import datetime
import resSync
class syncDds(resSync.Sync):
def pre_action(self, action):
resources = [ r for r in self.rset.resources if not r.skip and not r.is_disabled() ]
if len(resources) == 0:
return
self.pre_sync_check_prd_svc_on_non_prd_node()
for i, r in enumerate(resources):
if not r.svc_syncable():
return
r.get_info()
if action == 'sync_full':
r.create_snap1()
elif action in ['sync_update', 'sync_resync', 'sync_drp', 'sync_nodes']:
if action == 'sync_nodes' and self.target != ['nodes']:
return
if action == 'sync_drp' and self.target != ['drpnodes']:
return
r.get_info()
r.get_snap1_uuid()
nb = 0
tgts = r.targets.copy()
for n in tgts:
try:
r.check_remote(n)
nb += 1
except:
self.targets -= set([n])
if nb != len(tgts):
self.log.error('all destination nodes must be present for dds-based synchronization to proceed')
raise ex.excError
r.create_snap2()
def snap_exists(self, dev):
if not os.path.exists(dev):
self.log.debug('dev path does not exist')
return False
cmd = ['lvs', '--noheadings', '-o', 'snap_percent', dev]
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
return False
if len(out.strip()) == 0:
self.log.debug('dev is not a snapshot')
return False
return True
def create_snap(self, dev, lv):
if self.snap_exists(dev):
self.log.error('%s should not exist'%dev)
raise ex.excError
cmd = ['lvcreate', '-s', '-n', lv,
'-L', str(self.snap_size)+'M',
os.path.join(os.sep, 'dev', self.src_vg, self.src_lv)
]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def set_statefile(self):
self.statefile = os.path.join(rcEnv.pathvar,
self.svc.svcname+'_'+self.rid+'_dds_state')
def create_snap1(self):
if self.snap_exists(self.snap2):
self.log.error('%s should not exist'%self.snap2)
raise ex.excError
self.create_snap(self.snap1, self.snap1_lv)
self.write_statefile()
def create_snap2(self):
self.create_snap(self.snap2, self.snap2_lv)
def snap_name(self, snap):
return os.path.basename(self.src_lv).replace('-', '_')+'_osvc_'+snap
def get_src_info(self):
(self.src_vg, self.src_lv, self.src_size) = lv_info(self, self.src)
if self.src_lv is None:
self.log.error("unable to fetch source logical volume information")
raise ex.excError
if self.snap_size == 0:
self.snap_size = self.src_size//10
self.snap1_lv = self.snap_name('snap1')
self.snap2_lv = self.snap_name('snap2')
self.snap1 = os.path.join(os.sep, 'dev', self.src_vg, self.snap1_lv)
self.snap2 = os.path.join(os.sep, 'dev', self.src_vg, self.snap2_lv)
self.snap1_cow = os.path.join(os.sep, 'dev', 'mapper',
'-'.join([self.src_vg.replace('-', '--'),
self.snap1_lv,
'cow'])
)
def get_peersenders(self):
self.peersenders = set([])
if 'nodes' not in self.target:
self.peersenders |= self.svc.nodes
self.peersenders -= set([rcEnv.nodename])
def get_targets(self):
self.targets = set()
if 'nodes' in self.target:
self.targets |= self.svc.nodes
if 'drpnodes' in self.target:
self.targets |= self.svc.drpnodes
self.targets -= set([rcEnv.nodename])
def get_info(self):
self.get_targets()
self.get_src_info()
def svc_syncable(self):
try:
self.pre_sync_check_svc_not_up()
self.pre_sync_check_flex_primary()
except ex.excAbortAction:
return False
return True
def sync_full(self):
if not self.svc_syncable():
return
for n in self.targets:
self.do_fullsync(n)
def do_fullsync(self, node):
dst = self.dsts[node]
cmd1 = ['dd', 'if='+self.snap1, 'bs=1M']
cmd2 = rcEnv.rsh.split() + [node, 'dd', 'bs=1M', 'of='+dst]
self.log.info(' '.join(cmd1 + ["|"] + cmd2))
p1 = Popen(cmd1, stdout=PIPE)
p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE)
p2.communicate()[0]
if p2.returncode != 0:
self.log.error("full sync failed")
raise ex.excError
self.push_statefile(node)
def get_snap1_uuid(self):
cmd = ['lvs', '--noheadings', '-o', 'uuid', self.snap1]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
self.snap1_uuid = out.strip()
def write_statefile(self):
self.set_statefile()
self.get_snap1_uuid()
self.log.info("update state file with snap uuid %s"%self.snap1_uuid)
with open(self.statefile, 'w') as f:
f.write(str(datetime.datetime.now())+';'+self.snap1_uuid+'\n')
def _push_statefile(self, node):
cmd = rcEnv.rcp.split() + [self.statefile, node+':'+self.statefile]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def push_statefile(self, node):
self.set_statefile()
self._push_statefile(node)
self.get_peersenders()
for s in self.peersenders:
self._push_statefile(s)
def apply_delta(self, node):
if not which('dds'):
raise ex.excError("dds executable not found")
dst = self.dsts[node]
extract_cmd = ['dds', '--extract', '--cow', self.snap1_cow, '--source',
self.snap2]
merge_cmd = ['dds', '--merge', '--dest', dst, '-v']
merge_cmd = rcEnv.rsh.split() + [node] + merge_cmd
self.log.info(' '.join(extract_cmd + ["|"] + merge_cmd))
p1 = Popen(extract_cmd, stdout=PIPE)
p2 = Popen(merge_cmd, stdin=p1.stdout, stdout=PIPE)
buff = p2.communicate()
if p2.returncode != 0:
if buff[1] is not None and len(buff[1]) > 0:
self.log.error(buff[1])
self.log.error("sync update failed")
raise ex.excError
if buff[0] is not None and len(buff[0]) > 0:
self.log.info(buff[0])
def do_update(self, node):
self.apply_delta(node)
def remove_snap1(self):
if not self.snap_exists(self.snap1):
return
cmd = ['lvremove', '-f', self.snap1]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def rename_snap2(self):
if not self.snap_exists(self.snap2):
self.log.error("%s should exist"%self.snap2)
raise ex.excError
if self.snap_exists(self.snap1):
self.log.error("%s should not exist"%self.snap1)
raise ex.excError
cmd = ['lvrename', self.src_vg, self.snap2_lv, self.snap1_lv]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def rotate_snaps(self):
self.remove_snap1()
self.rename_snap2()
def check_remote(self, node):
rs = self.get_remote_state(node)
if self.snap1_uuid != rs['uuid']:
self.log.error("%s last update uuid doesn't match snap1 uuid"%(node))
raise ex.excError
def get_remote_state(self, node):
self.set_statefile()
cmd1 = ['env', 'LANG=C', 'cat', self.statefile]
cmd = rcEnv.rsh.split() + [node] + cmd1
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("could not fetch %s last update uuid"%node)
raise ex.excError
return self.parse_statefile(out, node=node)
def get_local_state(self):
self.set_statefile()
with open(self.statefile, 'r') as f:
out = f.read()
return self.parse_statefile(out)
def parse_statefile(self, out, node=None):
self.set_statefile()
if node is None:
node = rcEnv.nodename
lines = out.strip().split('\n')
if len(lines) != 1:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
fields = lines[0].split(';')
if len(fields) != 2:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
return dict(date=fields[0], uuid=fields[1])
def sync_nodes(self):
if self.target != ['nodes']:
return
self.sync_update()
def sync_drp(self):
if self.target != ['drpnodes']:
return
self.sync_update()
def sync_update(self):
if not self.svc_syncable():
return
for n in self.targets:
self.do_update(n)
self.rotate_snaps()
self.write_statefile()
for n in self.targets:
self.push_statefile(n)
def checksum(self, node, bdev, q=None):
cmd = ['md5sum', bdev]
if node != rcEnv.nodename:
cmd = rcEnv.rsh.split() + [node] + cmd
(ret, out, err) = self.call(cmd)
if ret != 0:
return ""
o = out.split()
if q is not None:
q.put(o[0])
else:
self.checksums[node] = o[0]
def sync_verify(self):
if not self.svc_syncable():
return
self.get_info()
from multiprocessing import Process, Queue
self.checksums = {}
queues = {}
ps = []
self.log.info("start checksum threads. please be patient.")
for n in self.targets:
dst = self.dsts[n]
queues[n] = Queue()
p = Process(target=self.checksum, args=(n, dst, queues[n]))
p.start()
ps.append(p)
self.checksum(rcEnv.nodename, self.snap1)
self.log.info("md5 %s: %s"%(rcEnv.nodename, self.checksums[rcEnv.nodename]))
for p in ps:
p.join()
for n in self.targets:
self.checksums[n] = queues[n].get()
self.log.info("md5 %s: %s"%(n, self.checksums[n]))
if len(self.checksums) < 2:
self.log.error("not enough checksums collected")
raise ex.excError
err = False
for n in self.targets:
if self.checksums[rcEnv.nodename] != self.checksums[n]:
self.log.error("src/dst checksums differ for %s/%s"%(rcEnv.nodename, n))
err = True
if not err:
self.log.info("src/dst checksums verified")
def start(self):
pass
def stop(self):
pass
def can_sync(self, target=None):
try:
ls = self.get_local_state()
last = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
except IOError:
return True
return not self.skip_sync(last)
def _status(self, verbose=False):
try:
ls = self.get_local_state()
now = datetime.datetime.now()
last = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
delay = datetime.timedelta(minutes=self.sync_max_delay)
except ex.excError:
self.status_log("failed to get status")
return rcStatus.WARN
except IOError:
self.status_log("dds state file not found")
return rcStatus.WARN
except:
import sys
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
return rcStatus.WARN
if last < now - delay:
self.status_log("Last sync on %s older than %i minutes"%(last, self.sync_max_delay))
return rcStatus.WARN
return rcStatus.UP
def __init__(self,
rid=None,
target=None,
src=None,
dsts={},
delta_store=None,
sender=None,
snap_size=0,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.dds",
**kwargs)
self.label = "dds of %s to %s"%(src, ", ".join(target))
self.target = target
self.src = src
self.dsts = dsts
self.snap_size = snap_size
if delta_store is None:
self.delta_store = rcEnv.pathvar
else:
self.delta_store = delta_store
def __str__(self):
return "%s target=%s src=%s" % (resSync.Sync.__str__(self),\
self.target, self.src)
opensvc-1.8~20170412/lib/resIp.py 0000644 0001750 0001750 00000045472 13073467726 016501 0 ustar jkelbert jkelbert """
Generic ip resource driver.
"""
from __future__ import unicode_literals
import os
import resources as Res
from rcGlobalEnv import rcEnv
from rcUtilities import qcall, which, getaddr
import rcStatus
import rcExceptions as ex
IFCONFIG_MOD = __import__('rcIfconfig'+rcEnv.sysname)
class Ip(Res.Resource):
"""
Base ip resource driver.
"""
def __init__(self,
rid=None,
ipdev=None,
ipname=None,
mask=None,
gateway=None,
type="ip",
**kwargs):
Res.Resource.__init__(self, rid, type=type, **kwargs)
self.ipdev = ipdev
self.ipname = ipname
self.mask = mask
self.gateway = gateway
self.set_label()
self.lockfd = None
self.stacked_dev = None
self.addr = None
def set_label(self):
"""
Set the resource label property.
"""
self.label = str(self.ipname) + '@' + self.ipdev
def info(self):
"""
Contribute resource key/val pairs to the service's resinfo.
"""
if self.ipname is None:
return
try:
self.getaddr()
except ex.excError:
pass
from rcUtilities import to_cidr
data = [
["ipaddr", self.addr],
["ipname", self.ipname],
["ipdev", self.ipdev],
["mask", str(to_cidr(self.mask))],
["gateway", str(self.gateway)],
]
return self.fmt_info(data)
def getaddr(self, cache_fallback=False):
"""
Try resolving the ipname into an ip address. If the resolving fails and
is True, use the last successful resolution result.
"""
if self.ipname is None:
raise ex.excError("ip address is not allocated yet")
if self.addr is not None:
return
try:
self.log.debug("resolving %s", self.ipname)
self.addr = getaddr(self.ipname, cache_fallback=cache_fallback, log=self.log)
except Exception as exc:
if not self.disabled:
raise ex.excError("could not resolve name %s: %s" % (self.ipname, str(exc)))
def __str__(self):
return "%s ipdev=%s ipname=%s" % (Res.Resource.__str__(self),\
self.ipdev, self.ipname)
def setup_environ(self):
"""
Set the main resource properties as environment variables, so they
are available to triggers.
"""
os.environ['OPENSVC_IPDEV'] = str(self.ipdev)
os.environ['OPENSVC_IPNAME'] = str(self.ipname)
os.environ['OPENSVC_MASK'] = str(self.mask)
try:
self.getaddr()
os.environ['OPENSVC_IPADDR'] = str(self.addr)
except:
pass
elements = self.rid.split('#')
if len(elements) == 2:
index = elements[1]
else:
index = ''
var = 'OPENSVC_IP'+index
vals = []
for prop in ['ipname', 'ipdev', 'addr', 'mask']:
if getattr(self, prop) is not None:
vals.append(str(getattr(self, prop)))
else:
vals.append('unknown')
val = ' '.join(vals)
os.environ[var] = val
def _status(self, verbose=False):
"""
Evaluate the ip resource status.
"""
try:
self.getaddr()
except Exception as exc:
self.status_log(str(exc))
return rcStatus.UNDEF
ifconfig = IFCONFIG_MOD.ifconfig()
intf = ifconfig.interface(self.ipdev)
if intf is None and "dedicated" not in self.tags:
self.status_log("interface %s not found" % self.ipdev)
return rcStatus.DOWN
try:
if self.is_up():
return self.status_stdby(rcStatus.UP)
else:
return self.status_stdby(rcStatus.DOWN)
except ex.excNotSupported:
self.status_log("not supported")
return rcStatus.UNDEF
except ex.excError as exc:
self.status_log(str(exc))
return rcStatus.UNDEF
def arp_announce(self):
"""
Announce to neighbors the ip address is plumbed on ipdev through a
arping broadcast of unsollicited packets.
"""
if ':' in self.addr:
return
if not which("arping"):
self.log.warning("arp announce skipped. install 'arping'")
return
cmd = ["arping", "-U", "-c", "1", "-I", self.ipdev, "-s", self.addr, self.addr]
self.log.info(' '.join(cmd))
qcall(cmd)
def abort_start(self):
"""
Return True if the service start should be aborted because of a routed
ip conflict.
"""
if 'nonrouted' in self.tags or 'noaction' in self.tags:
return False
if self.addr is None:
return False
if not self.is_up() and self.check_ping():
return True
return False
def start_link(self):
"""
Start the ipdev link.
"""
raise ex.MissImpl('start_link')
def check_ping(self, count=1, timeout=5):
"""
Test if the ip is seen as active on the newtorks.
"""
raise ex.MissImpl('check_ping')
def startip_cmd(self):
"""
The os/driver specific start implementation.
"""
raise ex.MissImpl('startip_cmd')
def stopip_cmd(self):
"""
The os/driver specific stop implementation.
"""
raise ex.MissImpl('stopip_cmd')
def is_up(self):
"""
Return True if the ip is plumbed.
"""
ifconfig = self.get_ifconfig()
if ifconfig.has_param("ipaddr", self.addr) is not None or \
ifconfig.has_param("ip6addr", self.addr) is not None:
self.log.debug("%s@%s is up", self.addr, self.ipdev)
return True
self.log.debug("%s@%s is down", self.addr, self.ipdev)
return False
def allow_start(self):
"""
Do sanity checks before allowing the start.
"""
if self.is_up() is True:
self.log.info("%s is already up on %s", self.addr, self.ipdev)
raise ex.IpAlreadyUp(self.addr)
ifconfig = IFCONFIG_MOD.ifconfig()
intf = ifconfig.interface(self.ipdev)
if intf is None:
self.log.error("interface %s not found. Cannot stack over it.", self.ipdev)
raise ex.IpDevDown(self.ipdev)
if not intf.flag_up:
if hasattr(intf, 'groupname') and intf.groupname != "":
l = [_intf for _intf in ifconfig.get_matching_interfaces('groupname', intf.groupname) if _intf.flag_up]
if len(l) == 1:
self.log.info("switch %s to valid alternate path %s", self.ipdev, l[0].name)
intf = l[0]
self.ipdev = l[0].name
try:
self.start_link()
except ex.MissImpl:
self.log.error("interface %s is not up. Cannot stack over it.", self.ipdev)
raise ex.IpDevDown(self.ipdev)
if not self.svc.abort_start_done and self.check_ping():
self.log.error("%s is already up on another host", self.addr)
raise ex.IpConflict(self.addr)
return
def lock(self):
"""
Acquire the startip lock, protecting against allocation of the same
ipdev stacked device to multiple resources or multiple services.
"""
import lock
if self.svc.options.waitlock >= 0:
timeout = self.svc.options.waitlock
else:
timeout = 120
delay = 1
lockfd = None
action = "startip"
lockfile = os.path.join(rcEnv.pathlock, action)
details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
(timeout, delay, action, lockfile)
self.log.debug("acquire startip lock %s", details)
try:
lockfd = lock.lock(timeout=timeout, delay=delay, lockfile=lockfile, intent="startip")
except lock.lockTimeout as exc:
raise ex.excError("timed out waiting for lock %s: %s" % (details, str(exc)))
except lock.lockNoLockFile:
raise ex.excError("lock_nowait: set the 'lockfile' param %s" % details)
except lock.lockCreateError:
raise ex.excError("can not create lock file %s" % details)
except lock.lockAcquire as exc:
raise ex.excError("another action is currently running %s: %s" % (details, str(exc)))
except ex.excSignal:
raise ex.excError("interrupted by signal %s" % details)
except Exception as exc:
self.save_exc()
raise ex.excError("unexpected locking error %s: %s" % (details, str(exc)))
if lockfd is not None:
self.lockfd = lockfd
def unlock(self):
"""
Release the startip lock.
"""
import lock
lock.unlock(self.lockfd)
@staticmethod
def get_ifconfig():
"""
Wrapper around the os specific rcIfconfig module's ifconfig function.
Return a parsed ifconfig dataset.
"""
return IFCONFIG_MOD.ifconfig()
def start(self):
"""
Start the resource.
"""
if self.ipname is None:
self.log.warning("skip start: no ipname set")
return
self.getaddr()
try:
self.allow_start()
except (ex.IpConflict, ex.IpDevDown):
raise ex.excError
except (ex.IpAlreadyUp, ex.IpNoActions):
return
self.log.debug('pre-checks passed')
self.lock()
try:
arp_announce = self.start_locked()
finally:
self.unlock()
if arp_announce:
self.arp_announce()
try:
self.dns_update()
except ex.excError as exc:
self.log.error(str(exc))
def start_locked(self):
"""
The start codepath fragment protected by the startip lock.
"""
ifconfig = self.get_ifconfig()
if self.mask is None:
intf = ifconfig.interface(self.ipdev)
if intf is None:
raise ex.excError("netmask parameter is mandatory with 'noalias' tag")
self.mask = intf.mask
if self.mask == '':
raise ex.excError("No netmask set on parent interface %s" % self.ipdev)
elif isinstance(self.mask, list):
if len(self.mask) > 0:
self.mask = self.mask[0]
else:
raise ex.excError("No netmask set on parent interface %s" % self.ipdev)
if 'noalias' in self.tags:
self.stacked_dev = self.ipdev
else:
self.stacked_dev = ifconfig.get_stacked_dev(self.ipdev,\
self.addr,\
self.log)
if self.stacked_dev is None:
raise ex.excError("could not determine a stacked dev for parent "
"interface %s" % self.ipdev)
arp_announce = True
try:
ret = self.startip_cmd()[0]
self.can_rollback = True
except ex.excNotSupported:
self.log.info("start ip not supported")
ret = 0
arp_announce = False
if ret != 0:
raise ex.excError("failed")
return arp_announce
def dns_update(self):
"""
Post a dns update request to the collector.
"""
from svcBuilder import conf_get_string_scope, conf_get_boolean_scope
if self.ipname is None:
self.log.debug("skip dns update: ipname is not set")
return
try:
conf_get_boolean_scope(self.svc, self.svc.config, self.rid,
"dns_update")
except ex.OptNotFound:
self.log.debug("skip dns update: dns_update is not set")
return
if not self.is_up():
self.log.debug("skip dns update: resource is not up")
return
try:
dns_name_suffix = conf_get_string_scope(self.svc, self.svc.config,
self.rid, "dns_name_suffix")
except ex.OptNotFound:
dns_name_suffix = None
self.log.debug("dns update: dns_name_suffix is not set")
try:
self.getaddr()
except ex.excError as exc:
self.log.error(str(exc))
return
post_data = {
"content": self.addr,
}
if dns_name_suffix:
post_data["name"] = dns_name_suffix
try:
data = self.svc.node.collector_rest_post(
"/dns/services/records",
post_data,
svcname=self.svc.svcname,
)
except Exception as exc:
raise ex.excError("dns update failed: "+str(exc))
if "error" in data:
raise ex.excError(data["error"])
self.log.info("dns updated")
def stop(self):
"""
Stop the resource.
"""
if self.ipname is None:
self.log.warning("skip stop: no ipname set")
return
self.getaddr(cache_fallback=True)
if self.is_up() is False:
self.log.info("%s is already down on %s", self.addr, self.ipdev)
return
ifconfig = self.get_ifconfig()
if 'noalias' in self.tags:
self.stacked_dev = self.ipdev
else:
self.stacked_dev = ifconfig.get_stacked_dev(self.ipdev,\
self.addr,\
self.log)
if self.stacked_dev is None:
raise ex.excError
try:
ret = self.stopip_cmd()[0]
except ex.excNotSupported:
self.log.info("stop ip not supported")
return
if ret != 0:
self.log.error("failed")
raise ex.excError
import time
tmo = 15
idx = 0
for idx in range(tmo):
if not self.check_ping(count=1, timeout=1):
break
time.sleep(1)
if idx == tmo-1:
self.log.error("%s refuse to go down", self.addr)
raise ex.excError
def allocate(self):
"""
Request an ip in the ipdev network from the collector.
"""
from svcBuilder import conf_get_string_scope
import ipaddress
try:
conf_get_string_scope(self.svc, self.svc.config, self.rid, "ipname")
self.log.info("skip allocate: an ip is already defined")
return
except ex.OptNotFound:
pass
if self.ipdev is None:
self.log.info("skip allocate: ipdev is not set")
return
try:
# explicit network setting
network = conf_get_string_scope(self.svc, self.svc.config, self.rid, "network")
except ex.OptNotFound:
network = None
if network is None:
# implicit network: the network of the first ipdev ip
ifconfig = IFCONFIG_MOD.ifconfig()
intf = ifconfig.interface(self.ipdev)
try:
if isinstance(intf.ipaddr, list):
baseaddr = intf.ipaddr[0]
else:
baseaddr = intf.ipaddr
network = str(ipaddress.IPv4Interface(baseaddr).network.network_address)
except ValueError:
self.log.info("skip allocate: ipdev has no configured address "
"and network is not set")
return
post_data = {
"network": network,
}
try:
post_data["name"] = conf_get_string_scope(self.svc, self.svc.config,
self.rid, "dns_name_suffix")
except ex.OptNotFound:
self.log.debug("allocate: dns_name_suffix is not set")
try:
data = self.svc.node.collector_rest_post(
"/networks/%s/allocate" % network,
post_data,
svcname=self.svc.svcname,
)
except Exception as exc:
raise ex.excError("ip allocation failed: "+str(exc))
if "error" in data:
raise ex.excError(data["error"])
if "info" in data:
self.log.info(data["info"])
self.ipname = data["data"]["ip"]
self.addr = self.ipname
self.set_label()
self.svc._set(self.rid, "ipname", self.ipname)
if self.gateway in (None, ""):
gateway = data.get("data", {}).get("network", {}).get("gateway")
if gateway:
self.log.info("set gateway=%s", gateway)
self.svc._set(self.rid, "gateway", gateway)
self.gateway = gateway
if self.mask in (None, ""):
netmask = data.get("data", {}).get("network", {}).get("netmask")
if netmask:
self.log.info("set netmask=%s", netmask)
self.svc._set(self.rid, "netmask", netmask)
self.mask = str(netmask)
self.log.info("ip %s allocated", self.ipname)
def release(self):
"""
Release an allocated ip a collector managed network.
"""
from svcBuilder import conf_get_string_scope
if self.ipname is None:
self.log.info("skip release: no ipname set")
return
try:
self.getaddr()
except ex.excError:
self.log.info("skip release: ipname does not resolve to an address")
return
post_data = {}
try:
post_data["name"] = conf_get_string_scope(self.svc, self.svc.config,
self.rid, "dns_name_suffix")
except ex.OptNotFound:
self.log.debug("allocate: dns_name_suffix is not set")
try:
data = self.svc.node.collector_rest_post(
"/networks/%s/release" % self.addr,
post_data,
svcname=self.svc.svcname,
)
except Exception as exc:
raise ex.excError("ip release failed: "+str(exc))
if "error" in data:
self.log.warning(data["error"])
return
if "info" in data:
self.log.info(data["info"])
self.svc._unset(self.rid, "ipname")
self.log.info("ip %s released", self.ipname)
def provision(self):
"""
Provision the ip resource, allocate an ip collector's side, and
start it.
"""
self.allocate()
self.start()
def unprovision(self):
"""
Unprovision the ip resource, meaning unplumb and release collector's
side.
"""
self.stop()
self.release()
opensvc-1.8~20170412/lib/provDiskVgHP-UX.py 0000644 0001750 0001750 00000003350 13073467726 020264 0 ustar jkelbert jkelbert from provisioning import Provisioning
import os
import json
import rcExceptions as ex
from stat import *
from rcUtilities import justcall
import glob
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
self.pvs = r.svc.config.get(self.r.rid, 'pvs')
self.pvs = self.pvs.split()
try:
self.options = r.svc.config.get(self.r.rid, 'options').split()
except:
self.options = []
l = []
for pv in self.pvs:
l += glob.glob(pv)
self.pvs = l
def provisioner(self):
if self.r.has_it():
self.r.log.info("already provisioned")
return
err = False
for i, pv in enumerate(self.pvs):
if not os.path.exists(pv):
self.r.log.error("pv %s does not exist"%pv)
err |= True
mode = os.stat(pv)[ST_MODE]
if S_ISBLK(mode):
continue
else:
self.r.log.error("pv %s is not a block device nor a loop file"%pv)
err |= True
if err:
raise ex.excError
for pv in self.pvs:
pv = pv.replace('/disk/', '/rdisk/')
cmd = ['pvcreate', '-f', pv]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
pvs = []
for pv in self.pvs:
pvs.append(pv.replace('/rdisk/', '/disk/'))
cmd = ['vgcreate']
if len(self.options) > 0:
cmd += self.options
cmd += [self.r.name] + pvs
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
self.r.log.info("provisioned")
return True
opensvc-1.8~20170412/lib/checkNumaLinux.py 0000644 0001750 0001750 00000002613 13073467726 020323 0 ustar jkelbert jkelbert import checks
import os
import glob
import math
class check(checks.check):
chk_type = "numa"
def do_check(self):
nodeinfo = {}
memtotal = 0
n_nodes = 0
n_cpu = 0
for npath in glob.glob("/sys/devices/system/node/node*"):
node_n_cpu = len(glob.glob(npath+"/cpu*"))
node = os.path.basename(npath)
with open(npath+"/meminfo", 'r') as f:
lines = f.read().strip('\n').split('\n')
for line in lines:
if 'MemTotal' in line:
try:
node_mem = int(line.split()[-2])
except:
continue
memtotal += node_mem
n_nodes += 1
n_cpu += node_n_cpu
nodeinfo[node] = {"mem": node_mem, "cpu": node_n_cpu}
break
r = []
if n_nodes < 2:
return r
target_per_cpu = memtotal / n_cpu
for node, info in nodeinfo.items():
target = target_per_cpu * info['cpu']
deviation = math.fabs(100. * (info['mem'] - target) // target)
r.append({
'chk_instance': node+'.mem.leveling',
'chk_value': str(deviation),
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/snapAdvfsOSF1.py 0000644 0001750 0001750 00000006203 13073467726 017762 0 ustar jkelbert jkelbert import os
from rcUtilities import justcall, protected_mount
import rcExceptions as ex
import snap
import rcAdvfs
from rcMountsOSF1 import Mounts
from rcGlobalEnv import rcEnv
class Snap(snap.Snap):
"""Defines a snap object with ZFS
"""
def snapcreate(self, m):
""" create a snapshot for m
add self.snaps[m] with
dict(snapinfo key val)
"""
dom, fset = m.device.split('#')
o = rcAdvfs.Fdmns()
try:
d = o.get_fdmn(dom)
except rcAdvfs.ExInit:
raise ex.syncNotSnapable
if fset not in d.fsets:
raise ex.syncNotSnapable
clonefset = fset +'@osvc_sync'
mount_point = m.mount_point
snap_mount_point = os.path.join(rcEnv.pathtmp, 'clonefset/%s/%s/osvc_sync'%(m.svc.svcname,mount_point))
snap_mount_point = os.path.normpath(snap_mount_point)
if not os.path.exists(snap_mount_point):
try:
os.makedirs(snap_mount_point)
self.log.info('create directory %s'%snap_mount_point)
except:
self.log.error('failed to create directory %s'%snap_mount_point)
raise ex.syncSnapCreateError
clonedev = '#'.join((dom, clonefset))
if Mounts().has_mount(clonedev, snap_mount_point):
cmd = ['fuser', '-kcv', snap_mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
cmd = ['umount', snap_mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
if clonefset in d.fsets:
(ret, buff, err) = self.vcall(['rmfset', '-f', dom, clonefset])
if ret != 0:
raise ex.syncSnapDestroyError
(ret, buff, err) = self.vcall(['clonefset', dom, fset, clonefset])
if ret != 0:
raise ex.syncSnapCreateError
(ret, buff, err) = self.vcall(['mount', '-t', 'advfs', clonedev, snap_mount_point])
if ret != 0:
raise ex.syncSnapCreateError
self.snaps[mount_point]={'snap_mnt' : snap_mount_point, \
'snapdev' : clonedev }
def snapdestroykey(self, snap_key):
""" destroy a snapshot for a mount_point
"""
clonedev = self.snaps[snap_key]['snapdev']
dom, clonefset = clonedev.split('#')
o = rcAdvfs.Fdmns()
try:
d = o.get_fdmn(dom)
except rcAdvfs.ExInit:
raise ex.syncSnapDestroyError
if clonefset not in d.fsets:
return
if protected_mount(self.snaps[snap_key]['snap_mnt']):
self.log.error("the clone fset is no longer mounted in %s. panic."%self.snaps[snap_key]['snap_mnt'])
raise ex.excError
cmd = ['fuser', '-kcv', self.snaps[snap_key]['snap_mnt']]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
cmd = ['umount', self.snaps[snap_key]['snap_mnt']]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
(ret, buff, err) = self.vcall(['rmfset', '-f', dom, clonefset])
if ret != 0:
raise ex.syncSnapDestroyError
opensvc-1.8~20170412/lib/hostidWindows.py 0000644 0001750 0001750 00000000102 13073467726 020241 0 ustar jkelbert jkelbert from uuid import getnode
def hostid():
return str(getnode())
opensvc-1.8~20170412/lib/resDiskDiskLinux.py 0000644 0001750 0001750 00000002523 13073467726 020644 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import time
import rcExceptions as ex
import resDiskDisk
from rcUtilities import lazy, which, justcall
import rcStatus
class Disk(resDiskDisk.Disk):
@lazy
def devpath(self):
return "/dev/disk/by-id/wwn-0x%s" % str(self.disk_id).lower().replace("0x", "")
def _status(self, verbose=False):
if self.disk_id is None:
return rcStatus.NA
if not os.path.exists(self.devpath):
self.status_log("%s does not exist" % self.devpath, "warn")
return rcStatus.DOWN
return rcStatus.NA
def devlist(self):
try:
dev = os.path.realpath(self.devpath)
return set([dev])
except Exception as exc:
print(exc)
pass
return set()
def provision(self):
resDiskDisk.Disk.provision(self)
self.wait_udev()
if which("multipath"):
dev = os.path.realpath(self.devpath)
cmd = ["multipath", "-v1", dev]
ret, out, err = self.vcall(cmd)
def wait_udev(self):
for retry in range(30):
if os.path.exists(self.devpath):
self.log.info("%s now exists", self.devpath)
return
time.sleep(1)
raise ex.excError("time out waiting for %s to appear" % self.devpath)
opensvc-1.8~20170412/lib/nodemgr.py 0000644 0001750 0001750 00000004476 13073467726 017051 0 ustar jkelbert jkelbert from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import optparse
#
# add project lib to path
#
prog = "nodemgr"
import rcStatus
import rcColor
import rcExceptions as ex
from rcGlobalEnv import *
from rcUtilities import ximport
from nodemgr_parser import NodemgrOptParser
node_mod = ximport('node')
def get_extra_argv(argv=None):
"""
Extract extra argv from nodemgr argv.
nodemgr can act as a wrapper for other commands (storage drivers for
example).
"""
if argv is None:
argv = sys.argv[1:]
if len(argv) < 2:
return argv, []
if "array" not in argv:
return argv, []
pos = argv.index('array')
if len(argv) > pos + 1:
extra_argv = argv[pos+1:]
else:
extra_argv = []
argv = argv[:pos+1]
return argv, extra_argv
def do_symcli_db_file(options):
try:
symcli_db_file = options.symcli_db_file
except AttributeError:
return
if symcli_db_file is None:
return
if not os.path.exists(symcli_db_file):
print("File does not exist: %s" % symcli_db_file)
return
os.environ['SYMCLI_DB_FILE'] = symcli_db_file
os.environ['SYMCLI_OFFLINE'] = '1'
def _main(node, argv=None):
argv, extra_argv = get_extra_argv(argv)
optparser = NodemgrOptParser(argv)
options, action = optparser.parse_args(argv)
options.extra_argv = extra_argv
rcColor.use_color = options.color
node.options.update(options.__dict__)
do_symcli_db_file(options)
if action.startswith("collector_cli"):
action = "collector_cli"
node.check_privs(action)
err = 0
try:
err = node.action(action)
except KeyboardInterrupt:
sys.stderr.write("Keybord Interrupt\n")
err = 1
except ex.excError:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
es = str(exc_value)
if len(es) > 0:
sys.stderr.write(str(exc_value)+'\n')
err = 1
except:
raise
err = 1
return err
def main(argv=None):
node = node_mod.Node()
try:
return _main(node, argv=argv)
except ex.excError as exc:
print(exc, file=sys.stderr)
return 1
finally:
node.close()
if __name__ == "__main__":
ret = main()
sys.exit(ret)
opensvc-1.8~20170412/lib/provFsExt4.py 0000644 0001750 0001750 00000000176 13073467726 017433 0 ustar jkelbert jkelbert import provFs
class ProvisioningFs(provFs.ProvisioningFs):
mkfs = ['mkfs.ext4', '-F', '-q']
info = ['tune2fs', '-l']
opensvc-1.8~20170412/lib/resHbovm.py 0000644 0001750 0001750 00000004246 13073467726 017176 0 ustar jkelbert jkelbert import resHb
from rcGlobalEnv import rcEnv
import os
import rcStatus
import rcExceptions as ex
from rcUtilities import justcall, which
import rcOvm
class Hb(resHb.Hb):
""" HeartBeat ressource
"""
def __init__(self,
rid=None,
name=None,
**kwargs):
resHb.Hb.__init__(self,
rid,
"hb.ovm",
**kwargs)
self.ovsinit = os.path.join(os.sep, 'etc', 'init.d', 'ovs-agent')
def process_running(self):
cmd = [self.ovsinit, 'status']
(out, err, ret) = justcall(cmd)
if ret != 0:
return False
for line in out.split('\n'):
if len(line) == 0:
continue
if not line.startswith('ok!'):
return False
return True
def stop(self):
try:
self.manager = rcOvm.Ovm(log=self.log)
for r in self.svc.get_resources('container.ovm'):
self.manager.vm_disable_ha(r.name)
except ex.excError as e:
self.log.error(str(e))
raise
def start(self):
try:
self.manager = rcOvm.Ovm(log=self.log)
for r in self.svc.get_resources('container.ovm'):
self.manager.vm_enable_ha(r.name)
except ex.excError as e:
self.log.error(str(e))
raise
self.can_rollback = True
def __status(self, verbose=False):
if not os.path.exists(self.ovsinit):
self.status_log("OVM agent is not installed")
return rcStatus.WARN
if not self.process_running():
self.status_log("OVM agent daemons are not running")
return rcStatus.WARN
try:
self.manager = rcOvm.Ovm(log=self.log)
for r in self.svc.get_resources('container.ovm'):
ha_enabled = self.manager.vm_ha_enabled(r.name)
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if not ha_enabled:
self.status_log("HA not enabled for this VM")
return rcStatus.WARN
return rcStatus.UP
opensvc-1.8~20170412/lib/rcOvm.py 0000644 0001750 0001750 00000007012 13073467726 016471 0 ustar jkelbert jkelbert import os
import subprocess
from rcUtilities import which, call
import rcExceptions as ex
from rcGlobalEnv import rcEnv
class Ovm(object):
def __init__(self, log=None):
self.ovmcli = 'ovm'
if log is not None:
self.log = log
else:
import logging
self.log = logging.getLogger('OVM')
import ConfigParser
cf = rcEnv.authconf
self.conf = ConfigParser.RawConfigParser()
self.conf.read(cf)
if not self.conf.has_section("ovm"):
raise ex.excError("no auth information for OVM manager")
if not self.conf.has_option("ovm", "username"):
raise ex.excError("no username information for OVM manager")
if not self.conf.has_option("ovm", "password"):
raise ex.excError("no password information for OVM manager")
self.username = self.conf.get("ovm", "username")
self.password = self.conf.get("ovm", "password")
def test(self):
if which(self.ovmcli):
self.log.error("ovm CLI is not installed")
return False
if self.username is None:
self.log.error("manager username is not set")
return False
if self.password is None:
self.log.error("manager password is not set")
return False
return True
def ovm(self, args, check=True, verbose=True):
if not self.test:
raise ex.excError
cmd = [self.ovmcli, '-u', self.username, '-p', self.password, '-S";"'] + args
if verbose:
_cmd = [self.ovmcli, '-u', self.username, '-p', 'XXXXXX'] + args
self.log.info(subprocess.list2cmdline(_cmd))
ret, out, err = call(cmd, log=self.log)
if 'Error:' in out > 0:
self.log.error(out)
else:
self.log.info(out)
else:
ret, out, err = call(cmd, log=self.log)
if check and ret != 0:
raise ex.excError("ovm command execution error")
return ret, out, err
def get_pool(self):
cmd = ['svr', 'ls']
ret, out, err = self.ovm(cmd, verbose=False)
for line in out.split('\n'):
l = line.split(';')
if len(l) != 4:
continue
if l[1].strip('"') == rcEnv.nodename:
return l[3].strip('"')
raise ex.excError("can't find node's pool name")
def vm_enable_ha(self, vm):
pool = self.get_pool()
cmd = ['vm', 'conf', '-n', vm, '-s', pool, '-e']
self.ovm(cmd)
def vm_disable_ha(self, vm):
pool = self.get_pool()
cmd = ['vm', 'conf', '-n', vm, '-s', pool, '-d']
self.ovm(cmd)
def vm_info(self, vm):
pool = self.get_pool()
cmd = ['vm', 'info', '-n', vm, '-s', pool]
ret, out, err = self.ovm(cmd, verbose=False)
if ret != 0:
raise ex.excError("failed to fetch VM information from manager")
h = {}
for line in out.split('\n'):
l = line.split(':')
if len(l) != 2:
continue
h[l[0].strip()] = l[1].strip()
return h
def vm_ha_enabled(self, vm):
info = self.vm_info(vm)
if 'Hign Availability' in info and info['Hign Availability'] == 'Enabled':
return True
if 'High Availability' in info and info['High Availability'] == 'Enabled':
return True
return False
if __name__ == "__main__":
o = Ovm()
#o.vm_disable_ha("ovmguest1")
#o.vm_enable_ha("ovmguest1")
opensvc-1.8~20170412/lib/rcHp3par.py 0000644 0001750 0001750 00000034646 13073467726 017102 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import json
import rcExceptions as ex
from subprocess import *
import time
import urllib
import urllib2
from rcGlobalEnv import rcEnv
from rcUtilities import cache, clear_cache, justcall, which
import re
import datetime
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def reformat(s):
lines = s.split('\n')
for i, line in enumerate(lines):
if '%' in line:
# skip prompt
x = line.index("%") + 2
if x < len(line):
line = line[x:]
elif x == len(line):
line = ""
lines[i] = line
s = '\n'.join(lines)
s = s.replace("Pseudo-terminal will not be allocated because stdin is not a terminal.", "")
return s.strip()
class Hp3pars(object):
allowed_methods = ("ssh", "proxy", "cli")
def __init__(self, objects=[], log=None):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.arrays = []
if not os.path.exists(rcEnv.authconf):
raise ex.excError("%s not found" % rcEnv.authconf)
conf = ConfigParser.RawConfigParser()
conf.read(rcEnv.authconf)
m = {}
for s in conf.sections():
if not conf.has_option(s, "type") or \
conf.get(s, "type") != "hp3par":
continue
if self.filtering and not s in self.objects:
continue
username = None
manager = None
key = None
pwf = None
try:
method = conf.get(s, 'method')
except:
method = "ssh"
if method not in self.allowed_methods:
print("invalid method. allowed methods: %s" % ', '.join(self.allowed_methods))
continue
kwargs = {"log": log}
try:
manager = conf.get(s, 'manager')
kwargs['manager'] = manager
except Exception as e:
kwargs['manager'] = s
try:
username = conf.get(s, 'username')
key = conf.get(s, 'key')
kwargs['username'] = username
kwargs['key'] = key
except Exception as e:
if method in ("ssh"):
raise
try:
pwf = conf.get(s, 'pwf')
kwargs['pwf'] = pwf
except Exception as e:
if method in ("cli"):
raise
try:
cli = conf.get(s, 'cli')
kwargs['cli'] = cli
except Exception as e:
pass
self.arrays.append(Hp3par(s, method, **kwargs))
del(conf)
def __iter__(self):
for array in self.arrays:
yield(array)
class Hp3par(object):
def __init__(self, name, method, manager=None, username=None, key=None, pwf=None, cli="cli", svcname="", log=None):
self.name = name
self.manager = manager
self.method = method
self.username = username
self.pwf = pwf
self.cli = cli
self.svcname = svcname
self.key = key
self.keys = ['showvv', 'showsys', 'shownode', "showcpg", "showport", "showversion"]
self.uuid = None
self.remotecopy = None
self.virtualcopy = None
self.log = log
self.cache_sig_prefix = "hp3par."+self.manager+"."
def ssh_cmd(self, cmd, log=False):
_cmd = ['ssh', '-i', self.key, '@'.join((self.username, self.manager))]
cmd = 'setclienv csvtable 1 ; setclienv nohdtot 1 ; ' + cmd + ' ; exit'
return self._rcmd(_cmd, cmd, log=log)
def proxy_cmd(self, cmd, log=False):
url = 'https://%s/api/cmd/' % self.manager
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
header = { 'User-Agent' : user_agent }
values = {
'array' : self.name,
'cmd' : cmd,
'svcname' : self.svcname,
'uuid' : self.uuid,
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data, header)
try:
f = urllib2.build_opener().open(req)
response = f.read()
#response = urllib2.urlopen(req)
except Exception as e:
return "", str(e)
try:
d = json.loads(response)
ret = d['ret']
out = d['out']
err = d['err']
except:
ret = 1
out = ""
err = "unexpected proxy response format (not json)"
if ret != 0:
raise ex.excError("proxy error: %s" % err)
return out, err
def _rcmd(self, _cmd, cmd, log=False, retry=10):
p = Popen(_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p.stdin.write(cmd)
out, err = p.communicate()
out = reformat(out)
err = reformat(err)
if p.returncode != 0:
if ("Connection closed by remote host" in err or "Too many local CLI connections." in err) and retry > 0:
if log:
self.log.info("3par connection refused. try #%d" % retry)
time.sleep(1)
return self._rcmd(_cmd, cmd, log=log, retry=retry-1)
if log:
if len(out) > 0: self.log.info(out)
if len(err) > 0: self.log.error(err)
else:
print(cmd)
print(out)
raise ex.excError("3par command execution error")
return out, err
def cli_cmd(self, cmd, log=False):
if which(self.cli) is None:
raise ex.excError("%s executable not found" % self.cli)
os.environ["TPDPWFILE"] = self.pwf
os.environ["TPDNOCERTPROMPT"] = "1"
cmd = [self.cli, '-sys', self.name, '-nohdtot', '-csvtable'] + cmd.split()
if log:
s = " ".join(cmd)
s = re.sub(r'password \w+', 'password xxxxx', s)
self.log.info(s)
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = reformat(out)
err = reformat(err)
if p.returncode != 0:
if "The authenticity of the storage system cannot be established." in err:
raise ex.excError("3par connection error. array ssl cert is not trusted. open interactive session to trust it.")
if ("Connection closed by remote host" in err or "Too many local CLI connections." in err) and retry > 0:
if log:
self.log.info("3par connection refused. try #%d" % retry)
time.sleep(1)
return self._rcmd(_cmd, cmd, log=log, retry=retry-1)
if log:
if len(out) > 0: self.log.info(out)
if len(err) > 0: self.log.error(err)
else:
print(' '.join(cmd))
print(out)
raise ex.excError("3par command execution error")
return out, err
def get_uuid(self):
if self.uuid is not None:
return self.uuid
config = ConfigParser.RawConfigParser()
config.read(rcEnv.nodeconf)
try:
self.uuid = config.get("node", "uuid")
except:
pass
return self.uuid
def rcmd(self, cmd, log=False):
if self.method == "ssh":
return self.ssh_cmd(cmd, log=log)
elif self.method == "cli":
return self.cli_cmd(cmd, log=log)
elif self.method == "proxy":
self.get_uuid()
return self.proxy_cmd(cmd, log=log)
else:
raise ex.excError("unsupported method %s set in auth.conf for array %s" % (self.method, self.name))
def serialize(self, s, cols):
return json.dumps(self.csv_to_list_of_dict(s, cols))
def csv_to_list_of_dict(self, s, cols):
l = []
for line in s.splitlines():
v = line.strip().split(',')
h = {}
for a, b in zip(cols, v):
h[a] = b
if len(h) > 1:
l.append(h)
return l
@cache("has_virtualcopy")
def has_virtualcopy(self):
if self.virtualcopy is not None:
return self.virtualcopy
cmd = 'showlicense'
s = self.rcmd(cmd)[0].strip("\n")
self.virtualcopy = False
for line in s.split('\n'):
if "Virtual Copy" in line:
self.virtualcopy = True
return self.virtualcopy
@cache("has_remotecopy")
def has_remotecopy(self):
if self.remotecopy is not None:
return self.remotecopy
cmd = 'showlicense'
s = self.rcmd(cmd)[0].strip("\n")
self.remotecopy = False
for line in s.split('\n'):
if "Remote Copy" in line:
self.remotecopy = True
return self.remotecopy
def get_showvv(self):
if self.has_remotecopy():
cols = ["Name", "VV_WWN", "Prov", "CopyOf", "Tot_Rsvd_MB", "VSize_MB", "UsrCPG", "CreationTime", "RcopyGroup", "RcopyStatus"]
else:
cols = ["Name", "VV_WWN", "Prov", "CopyOf", "Tot_Rsvd_MB", "VSize_MB", "UsrCPG", "CreationTime"]
cmd = 'showvv -showcols ' + ','.join(cols)
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0]
return self.serialize(s, cols)
def updatevv(self, vvnames=None, log=False):
cmd = 'updatevv -f'
if vvnames is None or len(vvnames) == 0:
raise ex.excError("updatevv: no vv names specified")
if vvnames:
cmd += ' ' + ' '.join(vvnames)
s = self.rcmd(cmd, log=log)[0]
def showvv(self, vvnames=None, vvprov=None, cols=None):
fdata = []
data = self._showvv()
for d in data:
if vvnames and d["Name"] not in vvnames:
continue
if vvprov and d["Prov"] != vvprov:
continue
fdata.append(d)
return fdata
@cache("showvv")
def _showvv(self):
cols = ["Name", "CreationTime", "Prov"]
cmd = 'showvv -showcols ' + ','.join(cols)
out, err = self.rcmd(cmd)
return self.csv_to_list_of_dict(out, cols)
def showrcopy(self, rcg):
"""
Remote Copy System Information
Status: Started, Normal
Group Information
Name ,Target ,Status ,Role ,Mode ,Options
RCG.SVCTEST1,baie-pra,Started,Primary,Periodic,"Last-Sync 2014-03-05 10:19:42 CET , Period 5m, auto_recover,over_per_alert"
,LocalVV ,ID ,RemoteVV ,ID ,SyncStatus ,LastSyncTime
,LXC.SVCTEST1.DATA01,2706,LXC.SVCTEST1.DATA01,2718,Synced,2014-03-05 10:19:42 CET
,LXC.SVCTEST1.DATA02,2707,LXC.SVCTEST1.DATA02,2719,Synced,2014-03-05 10:19:42 CET
"""
out, err = self._showrcopy()
if len(out) == 0:
raise ex.excError("unable to fetch rcg status")
lines = []
cols_rcg = ["Name", "Target", "Status", "Role", "Mode"]
cols_vv = ["LocalVV", "ID", "RemoteVV", "ID", "SyncStatus", "LastSyncTime"]
# extract rcg block
in_block = False
for line in out.splitlines():
if not in_block:
if not line.startswith(rcg+","):
continue
lines.append(line)
in_block = True
else:
if not line.startswith(" "):
break
lines.append(line)
if len(lines) == 0:
raise ex.excError("rcg does not exist")
# RCG status
rcg_s = lines[0]
options_start = rcg_s.index('"')
rcg_options = rcg_s[options_start+1:-1].split(",")
rcg_options = map(lambda x: x.strip(), rcg_options)
rcg_v = rcg_s[:options_start].split(",")
rcg_data = {}
for a, b in zip(cols_rcg, rcg_v):
rcg_data[a] = b
rcg_data["Options"] = rcg_options
# VV status
vv_l = []
for line in lines[1:]:
v = line.strip().strip(",").split(",")
if len(v) != len(cols_vv):
continue
vv_data = {}
for a, b in zip(cols_vv, v):
vv_data[a] = b
vv_data['LastSyncTime'] = self.s_to_datetime(vv_data['LastSyncTime'])
vv_l.append(vv_data)
data = {'rcg': rcg_data, 'vv': vv_l}
return data
def s_to_datetime(self, s):
out, err, ret = justcall(["date", "--utc", "--date=%s" % s, '+%Y-%m-%d %H:%M:%S'])
d = datetime.datetime.strptime(out.strip(), "%Y-%m-%d %H:%M:%S")
return d
@cache("showrcopy_groups")
def _showrcopy(self):
cmd = 'showrcopy groups'
out, err = self.rcmd(cmd)
return out, err
def clear_showrcopy_cache(self):
clear_cache("showrcopy_groups", o=self)
def clear_caches(self):
clear_cache("showvv", o=self)
def get_showsys(self):
cols = ["ID", "Name", "Model", "Serial", "Nodes", "Master", "TotalCap", "AllocCap", "FreeCap", "FailedCap"]
cmd = 'showsys'
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0]
return self.serialize(s, cols)
def get_shownode(self):
cols = ["Available_Cache", "Control_Mem", "Data_Mem", "InCluster", "LED", "Master", "Name", "Node", "State"]
cmd = 'shownode -showcols ' + ','.join(cols)
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0]
return self.serialize(s, cols)
def get_showcpg(self):
cols = ["Id", "Name", "Warn%", "VVs", "TPVVs", "Usr", "Snp", "Total", "Used", "Total", "Used", "Total", "Used"]
cmd = 'showcpg'
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0]
return self.serialize(s, cols)
def get_showport(self):
cols = ["N:S:P", "Mode", "State", "Node_WWN", "Port_WWN", "Type", "Protocol", "Label", "Partner", "FailoverState"]
cmd = 'showport'
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0]
return self.serialize(s, cols)
def get_showversion(self):
cmd = 'showversion -s'
print("%s: %s"%(self.name, cmd))
s = self.rcmd(cmd)[0].strip("\n")
return json.dumps({"Version": s})
if __name__ == "__main__":
o = Hp3pars()
for hp3par in o:
print(hp3par.get_showvv())
print(hp3par.get_showsys())
print(hp3par.get_shownode())
print(hp3par.get_showcpg())
opensvc-1.8~20170412/lib/provDiskVgLinux.py 0000644 0001750 0001750 00000005045 13073467726 020525 0 ustar jkelbert jkelbert from provisioning import Provisioning
from svcBuilder import conf_get_string_scope
import os
import json
import rcExceptions as ex
from stat import *
from rcUtilities import justcall
import glob
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def unprovisioner(self):
if not self.r.has_it():
self.r.log.info("already unprovisioned")
return
cmd = ['vgremove', '-ff', self.r.name]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
def provisioner(self):
if self.r.has_it():
self.r.log.info("already provisioned")
return
try:
self.pvs = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "pvs")
except ex.OptNotFound:
raise ex.excError("the 'pvs' parameter is mandatory for provisioning")
self.pvs = self.pvs.split()
l = []
for pv in self.pvs:
_l = glob.glob(pv)
self.r.log.info("expand %s to %s" % (pv, ', '.join(_l)))
l += _l
self.pvs = l
err = False
for i, pv in enumerate(self.pvs):
pv = os.path.realpath(pv)
if not os.path.exists(pv):
self.r.log.error("pv %s does not exist"%pv)
err |= True
mode = os.stat(pv)[ST_MODE]
if S_ISBLK(mode):
continue
elif S_ISREG(mode):
cmd = ['losetup', '-j', pv]
out, err, ret = justcall(cmd)
if ret != 0 or not out.startswith('/dev/loop'):
self.r.log.error("pv %s a regular file but not a loop"%pv)
err |= True
continue
self.pvs[i] = out.split(':')[0]
else:
self.r.log.error("pv %s is not a block device nor a loop file"%pv)
err |= True
if err:
raise ex.excError
for pv in self.pvs:
cmd = ['pvcreate', '-f', pv]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
if len(self.pvs) == 0:
raise ex.excError("no pvs specified")
cmd = ['vgcreate', self.r.name] + self.pvs
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
self.r.clear_cache("vg.lvs")
self.r.clear_cache("vg.lvs.attr")
self.r.clear_cache("vg.tags")
self.r.log.info("provisioned")
return True
opensvc-1.8~20170412/lib/resScsiReserv.py 0000644 0001750 0001750 00000020162 13073467726 020206 0 ustar jkelbert jkelbert import resources as Res
import uuid
import re
import time
import rcStatus
import rcExceptions as ex
from rcUtilities import which
from subprocess import *
from rcGlobalEnv import rcEnv
hostId = __import__('hostid'+rcEnv.sysname)
class ScsiReserv(Res.Resource):
"""Define method to acquire and release scsi SPC-3 persistent reservations
on disks held by a service
"""
def __init__(self,
rid=None,
peer_resource=None,
no_preempt_abort=False,
prkey=None,
**kwargs):
self.no_preempt_abort = no_preempt_abort
self.disks = set([])
self.preempt_timeout = 10
self.prtype = '5'
self.hostid = None
self.peer_resource = peer_resource
self.prkey = prkey
Res.Resource.__init__(self,
rid=rid+"pr",
type="disk.scsireserv",
**kwargs)
def set_label(self):
self.get_disks()
if len(self.disks) == 0:
self.label = 'preserv 0 scsi disk'
elif len(', '.join(self.disks)) > 248:
self.label = 'preserv '+', '.join(self.disks)[0:248]
self.label += " ..."
else:
self.label = ', '.join(self.disks)
def get_hostid(self):
if self.hostid:
return
if self.prkey:
self.hostid = self.prkey
return
try:
self.hostid = self.svc.node.get_prkey()
except Exception as e:
raise ex.excError(str(e))
def info(self):
self.get_hostid()
data = [
[self.svc.svcname, self.svc.node.nodename, self.svc.clustertype, self.rid, "prkey", self.hostid],
]
return data
def scsireserv_supported(self):
return False
def ack_unit_attention(self, d):
raise ex.notImplemented
def disk_registered(self, disk):
raise ex.notImplemented
def disk_register(self, disk):
raise ex.notImplemented
def disk_unregister(self, disk):
raise ex.notImplemented
def get_reservation_key(self, disk):
raise ex.notImplemented
def disk_reserved(self, disk):
raise ex.notImplemented
def disk_release(self, disk):
raise ex.notImplemented
def disk_reserve(self, disk):
raise ex.notImplemented
def disk_preempt_reservation(self, disk, oldkey):
if not hasattr(self, '_disk_preempt_reservation'):
raise ex.notImplemented
if not self.svc.options.force and not self.svc.options.cluster:
self.log.error("%s is already reserved. use --force to override this safety net"%disk)
raise ex.excError
return self._disk_preempt_reservation(disk, oldkey)
def get_disks(self):
if len(self.disks) > 0:
return
self.disks = self.peer_resource.disklist()
def ack_all_unit_attention(self):
self.get_disks()
for d in self.disks:
try:
if self.ack_unit_attention(d) != 0:
return 1
except ex.excScsiPrNotsupported:
continue
return 0
def register(self):
self.log.debug("starting register. prkey %s"%self.hostid)
self.get_disks()
r = 0
for d in self.disks:
try:
r += self.ack_unit_attention(d)
r += self.disk_register(d)
except ex.excScsiPrNotsupported:
continue
return r
def unregister(self):
self.log.debug("starting unregister. prkey %s"%self.hostid)
self.get_disks()
r = 0
for d in self.disks:
try:
r += self.ack_unit_attention(d)
if not self.disk_registered(d):
continue
r += self.disk_unregister(d)
except ex.excScsiPrNotsupported:
continue
return r
def disk_wait_reservation(self, disk):
for i in range(3, 0, -1):
if self.disk_reserved(disk):
self.log.info("reservation acquired for disk %s" % disk)
return 0
if i > 0:
time.sleep(1)
self.log.error("timed out waiting for reservation for disk %s" % disk)
return 1
def reserve(self):
self.log.debug("starting reserve. prkey %s"%self.hostid)
self.get_disks()
r = 0
for d in self.disks:
try:
r += self.ack_unit_attention(d)
key = self.get_reservation_key(d)
if key is None:
r += self.disk_reserve(d)
elif key == self.hostid:
continue
else:
r += self.disk_preempt_reservation(d, key)
r += self.disk_wait_reservation(d)
except ex.excScsiPrNotsupported:
continue
return r
def release(self):
self.log.debug("starting release. prkey %s"%self.hostid)
self.get_disks()
r = 0
for d in self.disks:
try:
r += self.ack_unit_attention(d)
if not self.disk_reserved(d):
continue
r += self.disk_release(d)
except ex.excScsiPrNotsupported:
continue
return r
def clear(self):
self.log.debug("starting clear. prkey %s"%self.hostid)
self.get_disks()
r = 0
for d in self.disks:
try:
r += self.ack_unit_attention(d)
if not self.disk_reserved(d):
continue
r += self.disk_clear_reservation(d)
except ex.excScsiPrNotsupported:
continue
return r
def checkreserv(self):
self.log.debug("starting checkreserv. prkey %s"%self.hostid)
if self.ack_all_unit_attention() != 0:
return rcStatus.WARN
r = rcStatus.Status()
for d in self.disks:
try:
key = self.get_reservation_key(d)
if key is None:
self.log.debug("disk %s is not reserved" % d)
r += rcStatus.DOWN
elif key != self.hostid:
self.log.debug("disk %s is reserved by another host whose key is %s" % (d, key))
r += rcStatus.DOWN
else:
self.log.debug("disk %s is correctly reserved" % d)
r += rcStatus.UP
except ex.excScsiPrNotsupported:
continue
return r.status
def scsireserv(self):
self.get_hostid()
if not self.scsireserv_supported():
return
r = 0
r += self.register()
r += self.reserve()
return r
def scsirelease(self):
self.get_hostid()
if not self.scsireserv_supported():
return
r = 0
if hasattr(self, 'disk_clear_reservation'):
r += self.clear()
else:
r += self.release()
r += self.unregister()
return r
def scsicheckreserv(self):
self.get_hostid()
if not self.scsireserv_supported():
return
return self.checkreserv()
def _status(self, verbose=False):
self.set_label()
try:
self.get_hostid()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
if not self.scsireserv_supported():
return rcStatus.NA
return self.checkreserv()
def start(self):
self.get_hostid()
if not self.scsireserv_supported():
return
if self._status() == rcStatus.UP:
self.log.info("already started")
return
self.can_rollback = True
if self.scsireserv() != 0:
raise ex.excError
def stop(self):
self.get_hostid()
if not self.scsireserv_supported():
return
if self.scsirelease() != 0:
raise ex.excError
def provision(self):
self.start()
def unprovision(self):
self.stop()
opensvc-1.8~20170412/lib/rcDiskInfoHP-UX.py 0000644 0001750 0001750 00000015051 13073467726 020222 0 ustar jkelbert jkelbert from rcUtilities import justcall, which
import rcDiskInfo
import os
class diskInfo(rcDiskInfo.diskInfo):
legacy_size_cache = {}
legacy_wwid_cache = {}
def __init__(self):
self.load_cache()
def load_cache(self):
self.load_aliases()
self.h = {}
cmd = ["scsimgr", "-p", "get_attr", "all_lun", "-a", "wwid", "-a", "device_file", "-a", "vid", "-a", "pid", "-a", "capacity"]
out, err, ret = justcall(cmd)
for e in out.split('\n'):
if len(e) == 0:
continue
(wwid, dev, vid, pid, size) = e.split(':')
wwid = wwid.replace('0x', '')
if len(size) != 0:
size = int(size)/2048
else:
size = 0
vid = vid.strip('" ')
pid = pid.strip('" ')
if dev in self.aliases:
aliases = self.aliases[dev]
else:
aliases = [dev]
for alias in aliases:
self.h[alias] = dict(wwid=wwid, vid=vid, pid=pid, size=size)
def load_ioscan(self, refresh=False):
if hasattr(self, "ioscan") and not refresh:
return self.ioscan
cmd = ['/usr/sbin/ioscan', '-FunNC', 'disk']
out, err, ret = justcall(cmd)
if ret != 0:
return
self.ioscan = []
"""
virtbus:wsio:T:T:F:1:13:10:disk:esdisk:64000/0xfa00/0xa:0 0 4 50 0 0 0 0 51 248 164 14 250 83 253 237 :18:root.ext_virtroot.esvroot.esdisk:esdisk:CLAIMED:DEVICE:EMC SYMMETRIX:-1:online
/dev/disk/disk17 /dev/disk/disk17_p3 /dev/rdisk/disk17_p1
/dev/disk/disk17_p1 /dev/pt/x64lmwbieb9_system /dev/rdisk/disk17_p2
/dev/disk/disk17_p2 /dev/rdisk/disk17 /dev/rdisk/disk17_p3
"""
for line in out.split('\n'):
if not line.startswith(' ') and not line.startswith('\t') and len(line) > 0:
l = line.split(":")
blk_major = l[5]
raw_major = l[6]
index = l[7]
vendor = l[17]
# mark ready for insertion as soon as we get a devname
devname = None
elif devname is None:
devname = line.split()[0]
self.ioscan.append({
'devname': devname,
'dev': ':'.join((blk_major, index)),
'rdev': ':'.join((raw_major, index)),
'vendor': vendor,
})
return self.ioscan
def load_aliases(self):
self.aliases = {}
cmd = ['/usr/sbin/ioscan', '-FunNC', 'disk']
out, err, ret = justcall(cmd)
if ret != 0:
return
l = []
for line in out.split('\n')+[':']:
if ':' in line:
if len(l) > 0:
for name in l:
self.aliases[name] = l
l = []
continue
for w in line.split():
l.append(w)
def dev2char(self, dev):
dev = dev.replace("/dev/disk/", "/dev/rdisk/")
dev = dev.replace("/dev/dsk/", "/dev/rdsk/")
return dev
def scan(self, dev):
cmd = ["scsimgr", "-p", "get_attr", "-D", self.dev2char(dev), "-a", "wwid", "-a", "device_file", "-a", "vid", "-a", "pid", "-a", "capacity"]
out, err, ret = justcall(cmd)
if ret != 0:
self.h[dev] = dict(wwid="", vid="", pid="", size=0)
return
(wwid, foo, vid, pid, size) = out.split(':')
wwid = wwid.replace('0x', '')
if len(size) != 0:
size = int(size)/2048
else:
size = 0
vid = vid.strip('" ')
pid = pid.strip('" ')
self.h[dev] = dict(wwid=wwid, vid=vid, pid=pid, size=size)
def get(self, dev, type):
if dev not in self.h:
self.scan(dev)
return self.h[dev][type]
def disk_id(self, dev):
id = self.get(dev, 'wwid')
if len(id) == 0:
id = self.get_legacy_wwid(dev)
return id
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
size = self.get(dev, 'size')
if size == 0:
size = self.get_legacy_size(dev)
if size is None or size == "":
# broken disk
size = 0
return size
def print_diskinfo(self, info):
info['size'] = self.disk_size(info['devname'])
info['hbtl'] = "#:#:#:#"
print(self.print_diskinfo_fmt%(
info['hbtl'],
os.path.basename(info['devname']),
info['size'],
info['dev'],
info['vendor'],
'',
))
def scanscsi(self, hba=None, target=None, lun=None):
ioscan_before = self.load_ioscan()
disks_before = map(lambda x: x['devname'], ioscan_before)
cmd = ['/usr/sbin/ioscan', '-fnC', 'disk']
out, err, ret = justcall(cmd)
if ret != 0:
return
ioscan_after = self.load_ioscan(refresh=True)
disks_after = map(lambda x: x['devname'], ioscan_after)
new_disks = set(disks_after) - set(disks_before)
self.print_diskinfo_header()
for info in ioscan_after:
if info['devname'] not in new_disks:
continue
self.print_diskinfo(info)
return 0
def get_legacy_wwid(self, devpath):
if devpath in self.legacy_wwid_cache:
self.legacy_wwid_cache[devpath]
if which("autopath"):
wwid = self.get_autopath_wwid(devpath)
self.legacy_wwid_cache[devpath] = wwid
return wwid
return ""
def get_autopath_wwid(self, devpath):
cmd = ["autopath", "display", devpath]
out, err, ret = justcall(cmd)
if ret != 0:
return ""
for line in out.split("\n"):
if "Lun WWN" in line:
return line.split(": ")[-1].replace("-","").lower()
return ""
def get_legacy_size(self, devpath):
""" return devpath size in megabytes
"""
if devpath in self.legacy_size_cache:
return self.legacy_size_cache[devpath]
if not which("diskinfo"):
return 0
cmd = ["diskinfo", "-b", devpath.replace("dsk", "rdsk").replace("disk", "rdisk")]
out, err, ret = justcall(cmd)
if ret != 0:
return 0
size = int(out.strip())/1024
self.legacy_size_cache[devpath] = size
return size
opensvc-1.8~20170412/lib/resContainerXen.py 0000644 0001750 0001750 00000006540 13073467726 020517 0 ustar jkelbert jkelbert import resources as Res
import os
import rcExceptions as ex
import resContainer
rcU = __import__("rcUtilities" + os.uname()[0])
class Xen(resContainer.Container):
startup_timeout = 180
shutdown_timeout = 120
def __init__(self,
rid,
name,
guestos=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.xen",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def list_conffiles(self):
cf = os.path.join(os.sep, 'opt', 'opensvc', 'var', self.name+'.xml')
if os.path.exists(cf):
return [cf]
return []
def files_to_sync(self):
return self.list_conffiles()
def check_capabilities(self):
cmd = ['virsh', 'capabilities']
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
self.status_log("can not fetch capabilities")
return False
return True
def ping(self):
return rcU.check_ping(self.addr, timeout=1, count=1)
def container_start(self):
cf = os.path.join(os.sep, 'opt', 'opensvc', 'var', self.name+'.xml')
if os.path.exists(cf):
cmd = ['virsh', 'define', cf]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = ['virsh', 'start', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_stop(self):
cmd = ['virsh', 'shutdown', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_forcestop(self):
cmd = ['virsh', 'destroy', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
cmd = ['virsh', 'dominfo', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
return False
if "running" in out.split() or "idle" in out.split() :
return True
return False
def get_container_info(self):
cmd = ['virsh', 'dominfo', self.name]
(ret, out, err) = self.call(cmd, errlog=False, cache=True)
self.info = {'vcpus': '0', 'vmem': '0'}
if ret != 0:
return self.info
for line in out.split('\n'):
if "CPU(s):" in line: self.info['vcpus'] = line.split(':')[1].strip()
if "Max memory" in line: self.info['vmem'] = line.split(':')[1].strip()
if "Autostart:" in line: self.info['autostart'] = line.split(':')[1].strip()
return self.info
def check_manual_boot(self):
self.get_container_info()
if self.info['autostart'] == 'disable' :
return True
else:
return False
opensvc-1.8~20170412/lib/rcZfs.py 0000644 0001750 0001750 00000015611 13073467726 016476 0 ustar jkelbert jkelbert from rcUtilities import justcall, call, vcall
import logging
import sys
"""
"""
def dataset_exists(device, type):
"""return Dataset(device).exists(type)"""
return Dataset(device).exists(type)
def zfs_getprop(dataset='undef_ds', propname='undef_prop'):
"return zfs dataset property propname value"
cmd = [ 'zfs', 'get', '-Hp', '-o', 'value', propname, dataset ]
(stdout, stderr, retcode) = justcall(cmd)
if retcode == 0 :
return stdout.split("\n")[0]
else:
return ""
def zfs_setprop(dataset='undef_ds', propname='undef_prop', propval='undef_val'):
"set zfs dataset property propname to value propval"
if zfs_getprop(dataset, propname) == propval :
return True
cmd = [ 'zfs', 'set', propname + '='+ propval, dataset ]
print(' '.join(cmd))
(retcode, stdout, stderr) = vcall(cmd)
if retcode == 0 :
return True
else:
return False
def a2pool_dataset(s):
"""return (pool,dataset) from mount point
example: a2pool_dataset('/') => ('rpool','rpool/ROOT/opensolaris-b134')
same with a2pool_dataset('rpool/ROOT/opensolaris-b134')
"""
if len(s) == 0:
return ("", "")
ss = s
if s[0] == '/':
cmd = ['zfs', 'list', '-H', '-o', 'name', s]
(ret, out, err) = call(cmd)
if ret != 0:
return ("", "")
ss = out.split('\n')[0]
x = ss.split('/')
if len(x) < 2:
return (ss, ss)
return (x[0], ss)
class Dataset(object):
"""Define Dataset Class"""
log = None
def __init__(self, name, log=None):
self.name = name
if log is None:
if Dataset.log is None:
Dataset.log = logging.getLogger("DATASET".upper())
Dataset.log.addHandler(logging.StreamHandler(sys.stdout))
Dataset.log.setLevel(logging.INFO)
self.log = Dataset.log
else:
self.log = log
def __str__(self, option=None):
if option is None:
cmd = ['zfs', 'list', self.name ]
else:
cmd = ['zfs', 'list'] + option + [ self.name ]
(retcode, stdout, stderr) = call(cmd, log=self.log)
if retcode == 0:
return stdout
else:
return "Failed to list info for dataset: %s" % (self.name)
def exists(self, type="all"):
"""return True if dataset exists else return False
if type is provided, also verify dataset type"""
(out, err, ret) = justcall('zfs get -H -o value type'.split()+[self.name])
if ret == 0 and type == "all":
return True
elif ret == 0 and out.split('\n')[0] == type:
return True
else:
return False
def create(self, option = None):
"create dataset with options"
if option is None:
cmd = ['zfs', 'create', self.name ]
else:
cmd = ['zfs', 'create'] + option + [ self.name ]
(retcode, stdout, stderr) = vcall(cmd, log=self.log)
if retcode == 0:
return True
else:
return False
def destroy(self, options=[]):
"destroy dataset"
cmd = ['zfs', 'destroy'] + options + [self.name]
(retcode, stdout, stderr) = vcall(cmd, log=self.log)
if retcode == 0:
return True
else:
return False
def getprop(self, propname):
"""get a dataset propertie value of dataset
If success return propperty value
else return ''
"""
cmd = [ 'zfs', 'get', '-Hp', '-o', 'value', propname, self.name ]
(stdout, stderr, retcode) = justcall(cmd)
if retcode == 0 :
return stdout.rstrip('\n')
else:
return ""
def setprop(self, propname, propval, err_to_warn=False, err_to_info=False):
"""set Dataset property value
Return True is success else return False
"""
cmd = [ 'zfs', 'set', propname + '='+ propval, self.name ]
(retcode, stdout, stderr) = vcall(cmd, log=self.log,
err_to_warn=err_to_warn,
err_to_info=err_to_info)
if retcode == 0 :
return True
else:
return False
def verify_prop(self, nv_pairs={}, err_to_warn=False, err_to_info=False):
"""for name, val from nv_pairs dict,
if zfs name property value of dataset differ from val
then zfs set name=value on dataset object"""
for name in nv_pairs.keys():
if self.getprop(name) != nv_pairs[name]:
self.setprop(propname=name, propval=nv_pairs[name],
err_to_warn=err_to_warn,
err_to_info=err_to_info)
def snapshot(self, snapname=None, recursive=False):
"""snapshot dataset
return snapshot dataset object
Return False if failure
"""
if snapname is None:
raise(rcExceptions.excBug("snapname should be defined"))
snapdataset = self.name + "@" + snapname
cmd = ['zfs', 'snapshot']
if recursive:
cmd.append("-r")
cmd.append(snapdataset)
(retcode, stdout, stderr) = vcall(cmd, log=self.log)
if retcode == 0:
return Dataset(snapdataset)
else:
return False
def clone(self, name, option=None):
"""clone dataset with options
return clone object
return False if failure
"""
if option is None:
cmd = ['zfs', 'clone', self.name, name]
else:
cmd = ['zfs', 'clone'] + option + [ self.name, name ]
(retcode, stdout, stderr) = vcall(cmd, log=self.log)
if retcode == 0:
return Dataset(name)
else:
return False
if __name__ == "__main__":
dsname="rpool/toto"
ds = Dataset(dsname)
if ds.create(option=[ "-o", "mountpoint=none"]) is False:
print("========== Failed")
else:
print(ds)
ds.verify_prop({'mountpoint':'/tmp/mnt', 'refquota':(10*1024*1024).__str__(),})
print("show type,refquota,mountpoint")
for p in ('type', 'refquota', 'mountpoint'):
print('%s value: %s'%(p, ds.getprop(p)))
print(ds)
val = ds.setprop('opensvc:name', 'Example')
print(ds.__str__(["-Ho", "opensvc:name"]))
val = ds.getprop('opensvc:name')
print("val Value=",val)
for sname in ["foo" , "bar"]:
s = ds.snapshot(sname)
if s is False:
print("========== Failed")
else:
print(s)
c=s.clone(dsname + "/clone_"+ sname)
if c is False:
print("========== Failed")
else:
print(c)
c.destroy()
if s.destroy() is False:
print("========== Failed")
if ds.exists:
print("Destroy dataset", ds.name)
if ds.destroy() is False:
print("Failed to create snapshot")
opensvc-1.8~20170412/lib/checkRaidSmartArray.py 0000644 0001750 0001750 00000006027 13073467726 021273 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall, which
import os
sep = ':'
path_list = os.environ['PATH'].split(sep) + ['/opt/HPQacucli/sbin']
os.environ['PATH'] = sep.join(path_list)
os.environ['INFOMGR_BYPASS_NONSA'] = '1'
class check(checks.check):
chk_type = "raid"
chk_name = "HP SmartArray"
def parse_errors(self, out):
r = []
lines = out.split('\n')
if len(lines) == 0:
return r
for line in lines:
l = line.split(': ')
if len(l) < 2 or not line.startswith(' '):
continue
if l[-1].strip() != "OK":
inst = line.strip().lower()
status = 1
else:
inst = l[0].strip().lower()
status = 0
r += [(inst, status)]
return r
def check_logicaldrive(self, slot):
cmd = ['controller', 'slot='+slot, 'logicaldrive', 'all', 'show', 'status']
out, err, ret = self.hpacucli(cmd)
if ret != 0:
return []
return self.parse_errors(out)
def check_physicaldrive(self, slot):
cmd = ['controller', 'slot='+slot, 'physicaldrive', 'all', 'show', 'status']
out, err, ret = self.hpacucli(cmd)
if ret != 0:
return []
return self.parse_errors(out)
def check_array(self, slot):
cmd = ['controller', 'slot='+slot, 'array', 'all', 'show', 'status']
out, err, ret = self.hpacucli(cmd)
if ret != 0:
return []
return self.parse_errors(out)
def check_controller(self, slot):
cmd = ['controller', 'slot='+slot, 'show', 'status']
out, err, ret = self.hpacucli(cmd)
if ret != 0:
return []
return self.parse_errors(out)
def hpacucli(self, cmd):
cmd = ['hpacucli'] + cmd
try:
out, err, ret = justcall(cmd)
except OSError:
cmd = [os.environ['SHELL']] + cmd
out, err, ret = justcall(cmd)
return out, err, ret
def do_check(self):
if not which('hpacucli'):
return self.undef
cmd = ['controller', 'all', 'show', 'status']
out, err, ret = self.hpacucli(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
r = []
for line in lines:
if ' Slot ' in line:
l = line.split()
idx = l.index('Slot')
uslot = l[idx+1]
slot = 'slot ' + uslot
_r = []
_r += self.check_controller(uslot)
_r += self.check_array(uslot)
_r += self.check_logicaldrive(uslot)
_r += self.check_physicaldrive(uslot)
for inst, value in _r:
r.append({
'chk_instance': ".".join((slot, inst)),
'chk_value': str(value),
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/resHbLinuxHA.py 0000644 0001750 0001750 00000002066 13073467726 017703 0 ustar jkelbert jkelbert import resHb
from rcGlobalEnv import rcEnv
import os
import rcStatus
import rcExceptions as ex
from rcUtilities import justcall, which
class Hb(resHb.Hb):
""" HeartBeat ressource
"""
def __init__(self,
rid=None,
name=None,
**kwargs):
resHb.Hb.__init__(self,
rid,
"hb.linuxha",
**kwargs)
self.status_cmd = 'cl_status'
self.name = name
def process_running(self):
cmd = [self.status_cmd, 'hbstatus']
(out, err, ret) = justcall(cmd)
if ret != 0:
return False
if not 'is running' in out:
return False
return True
def __status(self, verbose=False):
if not which(self.status_cmd):
self.status_log("heartbeat is not installed")
return rcStatus.WARN
if not self.process_running():
self.status_log("heartbeat daemons are not running")
return rcStatus.WARN
return rcStatus.NA
opensvc-1.8~20170412/lib/rcSysReportHP-UX.py 0000644 0001750 0001750 00000000230 13073467726 020457 0 ustar jkelbert jkelbert import rcSysReport
class SysReport(rcSysReport.SysReport):
def __init__(self, node=None):
rcSysReport.SysReport.__init__(self, node=node)
opensvc-1.8~20170412/lib/hostidAIX.py 0000644 0001750 0001750 00000000435 13073467726 017241 0 ustar jkelbert jkelbert from subprocess import *
def hostid():
cmd = ['uname', '-u']
p = Popen(cmd, stderr=None, stdout=PIPE, close_fds=True)
buff = p.communicate()
if p.returncode != 0:
return '1'
sn = buff[0].split()[0]
return str(hex(abs(sn.__hash__()))).replace('0x', '')
opensvc-1.8~20170412/lib/checkVgUsageHP-UX.py 0000644 0001750 0001750 00000003354 13073467726 020531 0 ustar jkelbert jkelbert import checks
from rcUtilities import call
class check(checks.check):
chk_type = "vg_u"
def find_svc(self, vgname):
for svc in self.svcs:
for resource in svc.get_resources('disk.vg'):
if resource.name == vgname:
return svc.svcname
return ''
def do_check(self):
"""
# vgdisplay -F
vg_name=/dev/vg00:vg_write_access=read,write:vg_status=available:max_lv=255:cur_lv=9:open_lv=9:max_pv=16:cur_pv=1:act_pv=1:max_pe_per_pv=4384:vgda=2:pe_size=32:total_pe=4347:alloc_pe=2712:free_pe=1635:total_pvg=0:total_spare_pvs=0:total_spare_pvs_in_use=0:vg_version=1.0:vg_max_size=2192g:vg_max_extents=70144
"""
cmd = ['vgdisplay', '-F']
(ret, out, err) = call(cmd, errlog=False)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 1:
return self.undef
r = []
for line in lines:
l = line.split(':')
if len(l) < 10:
continue
instance = None
free = None
size = None
for w in l:
if 'vg_name' in w:
instance = w.split('=')[1].replace('/dev/','')
elif 'total_pe' in w:
size = int(w.split('=')[1])
elif 'free_pe' in w:
free = int(w.split('=')[1])
if instance is None or free is None or size is None:
continue
val = int(100*(size-free)/size)
r.append({'chk_instance': instance,
'chk_value': str(val),
'chk_svcname': self.find_svc(instance),
}
)
return r
opensvc-1.8~20170412/lib/rcWakeOnLan.py 0000644 0001750 0001750 00000002614 13073467726 017552 0 ustar jkelbert jkelbert import sys
import socket
import struct
import re
regex_mac = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
regex_broadcast = re.compile('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
class wolrequest(object):
def __init__(self, macaddress, broadcast, udpport=7):
self.mac = macaddress
self.broadcast = broadcast
self.udpport = udpport
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def check_mac(self):
if not ':' and not '-' in self.mac:
return False
if regex_mac.match(self.mac.lower()) is None:
return False
if ':' in self.mac:
self.mac = self.mac.replace(':', '')
if '-' in self.mac:
self.mac = self.mac.replace('-', '')
if len(self.mac) != 12:
return False
return True
def check_broadcast(self):
if regex_broadcast.match(self.broadcast) is None:
return False
return True
def send(self):
buf = ''.join(['FFFFFFFFFFFF', self.mac * 20])
payload = ''
for i in range(0, len(buf), 2):
payload = ''.join([payload,struct.pack('B', int(buf[i: i + 2], 16))])
try:
self.sock.sendto(payload, (self.broadcast, self.udpport))
except:
return False
return True
opensvc-1.8~20170412/lib/rcIfconfigLinux.py 0000644 0001750 0001750 00000022075 13073467726 020502 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilities import which, cidr_to_dotted
import rcIfconfig
import copy
"""
ip addr:
1: lo: mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
...
4: eth0: mtu 1500 qdisc pfifo_fast master bond0 qlen 1000
link/ether 00:23:7d:a1:6f:96 brd ff:ff:ff:ff:ff:ff
6: sit0: mtu 1480 qdisc noop
link/sit 0.0.0.0 brd 0.0.0.0
7: bond0: mtu 1500 qdisc noqueue
link/ether 00:23:7d:a1:6f:96 brd ff:ff:ff:ff:ff:ff
inet 10.151.32.29/22 brd 10.151.35.255 scope global bond0
inet 10.151.32.50/22 brd 10.151.35.255 scope global secondary bond0:1
inet6 fe80::223:7dff:fea1:6f96/64 scope link
valid_lft forever preferred_lft forever
"""
class ifconfig(rcIfconfig.ifconfig):
def parse_ip(self, out):
for line in out.splitlines():
if len(line) == 0:
continue
if line[0] != " ":
"""
1: lo: mtu 65536 qdisc noqueue state UNKNOWN
"""
_line = line.split()
ifname = _line[1].strip(":")
if "@if" in ifname:
ifkname = ifname[ifname.index("@if"):]
ifname = ifname[:ifname.index("@if")]
else:
ifkname = None
i = rcIfconfig.interface(ifname)
i.ifkname = ifkname
# defaults
i.link_encap = ''
i.scope = []
i.bcast = []
i.mask = []
i.mtu = ''
i.ipaddr = []
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
self.intf.append(i)
prev = ''
for w in _line:
if 'mtu' == prev:
i.mtu = w
elif w.startswith('<'):
w = w.strip('<').strip('>')
flags = w.split(',')
for w in flags:
if 'UP' == w:
i.flag_up = True
if 'BROADCAST' == w:
i.flag_broadcast = True
if 'RUNNING' == w:
i.flag_running = True
if 'MULTICAST' == w:
i.flag_multicast = True
if 'LOOPBACK' == w:
i.flag_loopback = True
prev = w
elif line.strip().startswith("link"):
"""
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
"""
_line = line.split()
prev = ''
for w in _line:
if 'link/' in w:
i.link_encap = w.split('/')[1]
elif 'link/ether' == prev:
i.hwaddr = w
prev = w
elif line.strip().startswith("inet"):
"""
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
"""
_line = line.split()
if "global" in line and ":" in _line[-1]:
# clone parent intf and reset inet fields
ifname = line.split()[-1]
_i = copy.copy(i)
_i.name = ifname
_i.scope = []
_i.bcast = []
_i.mask = []
_i.ipaddr = []
_i.ip6addr = []
_i.ip6mask = []
self.intf.append(_i)
else:
_i = i
prev = ''
for w in _line:
if 'inet' == prev :
try:
ipaddr, mask = w.split('/')
except:
# tun for example
continue
_i.ipaddr += [ipaddr]
_i.mask += [cidr_to_dotted(mask)]
elif 'inet6' == prev:
try:
ip6addr, ip6mask = w.split('/')
except:
# tun for example
continue
_i.ip6addr += [ip6addr]
_i.ip6mask += [ip6mask]
elif 'brd' == prev and 'inet' in line:
_i.bcast += [w]
elif 'scope' == prev and 'inet' in line:
_i.scope += [w]
prev = w
def parse_ifconfig(self, out):
prev = ''
prevprev = ''
for w in out.split():
if w == 'Link':
i = rcIfconfig.interface(prev)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mask = ''
i.mtu = ''
i.ipaddr = ''
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
elif 'encap:' in w:
(null, i.link_encap) = w.split(':')
elif 'Scope:' in w:
(null, i.scope) = w.split(':')
elif 'Bcast:' in w:
(null, i.bcast) = w.split(':')
elif 'Mask:' in w:
(null, i.mask) = w.split(':')
elif 'MTU:' in w:
(null, i.mtu) = w.split(':')
if 'inet' == prev and 'addr:' in w:
(null, i.ipaddr) = w.split(':')
if 'inet6' == prevprev and 'addr:' == prev:
(ip6addr, ip6mask) = w.split('/')
i.ip6addr += [ip6addr]
i.ip6mask += [ip6mask]
if 'HWaddr' == prev:
i.hwaddr = w
if 'UP' == w:
i.flag_up = True
if 'BROADCAST' == w:
i.flag_broadcast = True
if 'RUNNING' == w:
i.flag_running = True
if 'MULTICAST' == w:
i.flag_multicast = True
if 'LOOPBACK' == w:
i.flag_loopback = True
prevprev = prev
prev = w
def get_mcast(self):
if which('netstat'):
cmd = ['netstat', '-gn']
out = Popen(cmd, stdout=PIPE).communicate()[0].decode()
return self.parse_mcast_netstat(out)
elif which('ip'):
cmd = ['ip', 'maddr']
out = Popen(cmd, stdout=PIPE).communicate()[0].decode()
return self.parse_mcast_ip(out)
def parse_mcast_netstat(self, out):
lines = out.splitlines()
found = False
data = {}
for i, line in enumerate(lines):
if line.startswith('--'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+1:]
for line in lines:
try:
intf, refcnt, addr = line.split()
except:
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
return data
def parse_mcast_ip(self, out):
lines = out.splitlines()
found = False
data = {}
for line in lines:
if not line.startswith(" "):
# new interface
try:
name = line.split(":")[-1].strip()
except Exception as e:
print(e)
break
if name == "":
continue
data[name] = []
continue
if "inet" not in line:
continue
data[name].append(line.split()[-1])
return data
def __init__(self, mcast=False, ip_out=None):
self.intf = []
if mcast:
self.mcast_data = self.get_mcast()
else:
self.mcast_data = {}
if ip_out:
self.parse_ip(ip_out)
elif which('ip'):
out = Popen(['ip', 'addr'], stdout=PIPE).communicate()[0].decode()
self.parse_ip(out)
else:
out = Popen(['ifconfig', '-a'], stdout=PIPE).communicate()[0].decode()
self.parse_ifconfig(out)
if __name__ == "__main__":
ifaces = ifconfig(mcast=True)
print(ifaces)
opensvc-1.8~20170412/lib/rcStatsCollectHP-UX.py 0000644 0001750 0001750 00000006703 13073467726 021124 0 ustar jkelbert jkelbert import os
import sys
import datetime
from subprocess import *
from rcUtilities import justcall, is_exe
from rcGlobalEnv import rcEnv
def collect(node):
now = str(datetime.datetime.now())
def fs_u():
vars = ['date',
'nodename',
'mntpt',
'size',
'used']
cmd = ['df', '-lP']
(out, err, ret) = justcall(cmd)
if ret != 0:
return
lines = out.split('\n')
if len(lines) < 2:
return
vals = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
vals.append([now, node.nodename, l[5], l[1], l[4].replace('%','')])
return (vars, vals)
def glance_running(cmd_str):
(out, err, ret) = justcall(['ps', '-ef'])
if ret != 0:
print('ps error')
return
for line in out.split('\n'):
l = line.split()
if len(l) < 6:
continue
if cmd_str in ' '.join(l[6:]):
return True
return False
def run_glance():
glance = '/opt/perf/bin/glance'
syn = '/opt/opensvc/tmp/glance.syntax'
now = datetime.datetime.now()
iterations = (23-now.hour)*6+(60-now.minute)//10
cmd = ['/opt/perf/bin/glance', '-aos', syn, '-j', '600', '-iterations']
cmd_str = ' '.join(cmd)
if not is_exe(glance):
print('glance executable not found')
return
if glance_running(cmd_str):
print('glance is already running')
return
buff = """print GBL_STATTIME," ",
// usr
0.00+GBL_CPU_NORMAL_UTIL+GBL_CPU_REALTIME_UTIL," ",
// nice
0.00+GBL_CPU_NICE_UTIL+GBL_CPU_NNICE_UTIL," ",
// sys
0.00+GBL_CPU_SYSCALL_UTIL+GBL_CPU_CSWITCH_UTIL+GBL_CPU_TRAP_UTIL+GBL_CPU_VFAULT_UTIL," ",
// irq
0.00+GBL_CPU_INTERRUPT_UTIL," ",
// wait
0.00+GBL_CPU_WAIT_UTIL," ",
// idle
0.00+GBL_CPU_IDLE_UTIL-GBL_CPU_WAIT_UTIL," ",
// mem
0+GBL_MEM_PHYS," ",
0+GBL_MEM_FREE," ",
0+GBL_MEM_CACHE," ",
0+GBL_MEM_FILE_PAGE_CACHE," ",
0+GBL_MEM_SYS," ",
0+GBL_MEM_USER," ",
// swap
0+GBL_MEM_SWAP," ",
0+GBL_SWAP_SPACE_AVAIL-GBL_MEM_PHYS," ",
// load
GBL_LOADAVG," ",
GBL_LOADAVG5," ",
GBL_LOADAVG15," ",
GBL_CPU_QUEUE," ",
// process list
TBL_PROC_TABLE_USED," ",
// disk io
GBL_DISK_PHYS_READ_RATE," ",
GBL_DISK_PHYS_WRITE_RATE," ",
// disk kB/s
GBL_DISK_PHYS_READ_BYTE_RATE," ",
GBL_DISK_PHYS_WRITE_BYTE_RATE
"""
try:
with open(syn, 'w') as f:
f.write(buff)
except:
print('error writing %s'%syn)
return
collect_d = '/opt/opensvc/var/'
collect_f = 'glance%0.2d'%now.day
collect_p = os.path.join(collect_d, collect_f)
if os.path.exists(collect_p):
mtime = os.stat(collect_p).st_mtime
if datetime.datetime.fromtimestamp(mtime) < now - datetime.timedelta(days=1):
os.unlink(collect_p)
_cmd = 'nohup %s %d >>%s &'%(cmd_str, iterations, collect_p)
process = Popen(_cmd, shell=True, stdout=PIPE, stderr=PIPE)
run_glance()
fs_u_data = fs_u()
if fs_u_data is not None:
node.collector.call('push_stats_fs_u', fs_u_data)
opensvc-1.8~20170412/lib/rcHds.py 0000644 0001750 0001750 00000061161 13073467726 016453 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import ConfigParser
import sys
import json
from xml.etree.ElementTree import XML, fromstring
import rcExceptions as ex
from rcGlobalEnv import rcEnv, Storage
from rcUtilities import which, justcall, convert_size, lazy
from rcOptParser import OptParser
from optparse import Option
PROG = "nodemgr array"
OPT = Storage({
"help": Option(
"-h", "--help", action="store_true", dest="parm_help",
help="show this help message and exit"),
"array": Option(
"-a", "--array", action="store", dest="array_name",
help="The name of the array, as defined in auth.conf"),
"pool": Option(
"--pool", action="store", dest="pool",
help="The name of the DP pool"),
"size": Option(
"--size", action="store", dest="size",
help="The disk size, expressed as a size expression like 1g, 100mib, ..."),
"lun": Option(
"--lun", action="store", dest="lun",
help="The LUN ID to assign on LU mapping"),
"mappings": Option(
"--mappings", action="append", dest="mappings",
help="A :,,... mapping used in add map in replacement of --targetgroup and --initiatorgroup. Can be specified multiple times."),
"name": Option(
"--name", action="store", dest="name",
help="A logical unit label"),
"devnum": Option(
"--devnum", action="store", dest="devnum",
help="A XX:CU:LDEV logical unit name"),
})
GLOBAL_OPTS = [
OPT.array,
]
DEPRECATED_ACTIONS = []
ACTIONS = {
"Generic actions": {
"add_disk": {
"msg": "Add and present a disk",
"options": [
OPT.name,
OPT.size,
OPT.pool,
OPT.mappings,
],
},
"add_map": {
"msg": "Present a disk",
"options": [
OPT.devnum,
OPT.mappings,
OPT.lun,
],
},
"del_disk": {
"msg": "Delete a disk",
"options": [
OPT.devnum,
],
},
"del_map": {
"msg": "Unpresent a disk",
"options": [
OPT.devnum,
OPT.mappings,
],
},
"rename_disk": {
"msg": "Rename a disk",
"options": [
OPT.devnum,
OPT.name,
],
},
"resize_disk": {
"msg": "Resize a disk",
"options": [
OPT.devnum,
OPT.size,
],
},
},
"Low-level actions": {
"list_arrays": {
"msg": "List arrays",
},
"list_pools": {
"msg": "List pools",
},
"list_arraygroups": {
"msg": "List array groups",
},
"list_domains": {
"msg": "List host groups",
},
"list_ports": {
"msg": "List ports",
},
"list_logicalunits": {
"msg": "List logical units",
"options": [
OPT.devnum,
],
},
},
}
class Arrays(object):
arrays = []
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = []
for s in conf.sections():
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "hds":
continue
try:
bin = conf.get(s, 'bin')
except:
bin = None
try:
jre_path = conf.get(s, 'jre_path')
os.environ["HDVM_CLI_JRE_PATH"] = jre_path
except:
path
try:
url = conf.get(s, 'url')
arrays = conf.get(s, 'array').split()
username = conf.get(s, 'username')
password = conf.get(s, 'password')
m += [(url, arrays, username, password, bin)]
except:
print("error parsing section", s)
pass
del(conf)
done = []
for url, arrays, username, password, bin in m:
for name in arrays:
if self.filtering and name not in self.objects:
continue
if name in done:
continue
self.arrays.append(Array(name, url, username, password, bin))
done.append(name)
def __iter__(self):
for array in self.arrays:
yield(array)
def get_array(self, name):
for array in self.arrays:
if array.name == name:
return array
return None
class Array(object):
def __init__(self, name, url, username, password, bin=None):
self.keys = ['array', 'lu', 'arraygroup', 'port', 'pool']
self.name = name
self.model = name.split(".")[0]
self.serial = name.split(".")[-1]
self.url = url
self.username = username
self.password = password
if bin is None:
self.bin = "HiCommandCLI"
else:
self.bin = bin
self.domain_portname = {}
self.port_portname = {}
def cmd(self, cmd, scoped=True, xml=True):
if which(self.bin) is None:
raise ex.excError("Can not find %s"%self.bin)
l = [
self.bin, self.url, cmd[0],
"-u", self.username,
"-p", self.password,
]
if xml:
l += [
"-f", "xml",
]
if scoped:
l += [
"serialnum="+self.serial,
"model="+self.model,
]
if len(cmd) > 1:
l += cmd[1:]
#print(" ".join(l))
out, err, ret = justcall(l)
if ret != 0:
raise ex.excError(err)
return out, err, ret
def parse(self, out):
lines = out.splitlines()
if lines[0] == "RESPONSE:":
# discard the "RESPONSE:" first line
lines = lines[1:]
def get_key_val(line):
idx = line.index("=")
key = line[:idx].strip()
val = line[idx+1:].strip()
try:
val = int(val.replace(" ", "").replace(",", ""))
except ValueError:
pass
return key, val
def _parse_instance(lines, start, ref_indent):
#print("parse instance", start, lines[start-1])
data = {}
nidx = -1
for idx, line in enumerate(lines[start:]):
if nidx > 0 and start+idx < nidx:
continue
indent = len(line) - len(line.lstrip())
if indent < ref_indent:
return data, start+idx
if line.strip().startswith("List of "):
obj_type = line.strip().split()[3]
data[obj_type], nidx = _parse_list(lines, start+idx+1)
try:
key, val = get_key_val(line)
data[key] = val
except ValueError:
pass
return data, start+idx
def _parse_list(lines, start=0):
#if start > 0:
# print("parse list ", start, lines[start-1])
data = []
nidx = -1
ref_indent = len(lines[start]) - len(lines[start].lstrip())
marker = lines[start]
for idx, line in enumerate(lines[start:]):
if nidx > 0 and start+idx < nidx:
continue
indent = len(line) - len(line.lstrip())
if indent < ref_indent:
return data, start+idx
if indent > ref_indent:
continue
if line == marker:
instance, nidx = _parse_instance(lines, start+idx+1, indent)
data.append(instance)
return data, start+idx
data, nidx =_parse_list(lines)
return data
def get_array_data(self, scoped=True):
cmd = ['GetStorageArray']
out, err, ret = self.cmd(cmd, scoped=scoped)
tree = fromstring(out)
data = []
for elem in tree.getiterator("StorageArray"):
data.append(elem.attrib)
return data
def get_array(self):
return json.dumps(self.get_array_data(scoped=True), indent=4)
def get_lu_data(self, devnum=None):
cmd = ['GetStorageArray', 'subtarget=Logicalunit', 'lusubinfo=Path,LDEV,VolumeConnection']
if devnum:
cmd += ["displayname="+str(devnum)]
out, err, ret = self.cmd(cmd)
tree = fromstring(out)
data = []
for e_lu in tree.getiterator("LogicalUnit"):
lu = e_lu.attrib
lu["Path"] = []
for e_path in e_lu.getiterator("Path"):
lu["Path"].append(e_path.attrib)
for e_ldev in e_lu.getiterator("LDEV"):
ldev = e_ldev.attrib
for e_label in e_ldev.getiterator("ObjectLabel"):
lu["label"] = e_label.attrib["label"]
data.append(lu)
return data
def get_lu(self):
return json.dumps(self.get_lu_data(), indent=4)
@lazy
def lu_data(self):
return self.get_lu_data()
def to_devnum(self, devnum):
devnum = str(devnum)
if ":" in devnum:
# 00:00:00 or 00:00 format
devnum = devnum.replace(":", "")
return str(int(devnum, 16))
if "." in devnum:
# . format (collector inventory fmt)
devnum = devnum.split(".")[-1]
return str(int(devnum, 16))
if len(devnum) in (32, 33):
# wwid format
devnum = devnum[-4:]
return str(int(devnum, 16))
return devnum
def get_logicalunit(self, devnum=None):
if devnum is None:
return
for lu in self.lu_data:
if lu["devNum"] == devnum:
return lu
def get_pool_data(self):
cmd = ['GetStorageArray', 'subtarget=Pool']
out, err, ret = self.cmd(cmd)
tree = fromstring(out)
data = []
for elem in tree.getiterator("Pool"):
data.append(elem.attrib)
return data
@lazy
def pool_data(self):
return self.get_pool_data()
def get_pool(self):
return json.dumps(self.get_pool_data(), indent=4)
def get_arraygroup_data(self):
cmd = ['GetStorageArray', 'subtarget=ArrayGroup']
out, err, ret = self.cmd(cmd)
tree = fromstring(out)
data = []
for elem in tree.getiterator("ArrayGroup"):
data.append(elem.attrib)
return data
def get_arraygroup(self):
return json.dumps(self.get_arraygroup_data(), indent=4)
def get_port_data(self):
cmd = ['GetStorageArray', 'subtarget=Port']
out, err, ret = self.cmd(cmd)
tree = fromstring(out)
data = []
for elem in tree.getiterator("Port"):
port = elem.attrib
port["worldWidePortName"] = port["worldWidePortName"].replace(".", "").lower()
data.append(port)
return data
@lazy
def port_data(self):
return self.get_port_data()
def get_port(self):
return json.dumps(self.get_port_data(), indent=4)
def get_domain_data(self):
cmd = ['GetStorageArray', 'subtarget=HostStorageDomain', 'hsdsubinfo=WWN,Path']
out, err, ret = self.cmd(cmd)
tree = fromstring(out)
data = []
for elem in tree.getiterator("HostStorageDomain"):
d = elem.attrib
d["WWN"] = []
d["Path"] = []
for subelem in elem.getiterator("WWN"):
wwn = subelem.attrib
wwn["WWN"] = wwn["WWN"].replace(".", "").lower()
d["WWN"].append(wwn)
for subelem in elem.getiterator("Path"):
path = subelem.attrib
d["Path"].append(path)
data.append(d)
return data
@lazy
def domain_data(self):
return self.get_domain_data()
def get_target_port(self, target):
for port in self.port_data:
if target == port["worldWidePortName"]:
return port
def get_hba_domain_used_lun_ids(self, hba_id):
domain = self.get_hba_domain(hba_id)
return [int(path["LUN"]) for path in domain["Path"]]
def get_hba_free_lun_id(self, hba_ids):
used_lun_ids = set()
for hba_id in hba_ids:
used_lun_ids |= set(self.get_hba_domain_used_lun_ids(hba_id))
for lun_id in range(65536):
if lun_id not in used_lun_ids:
return lun_id
def get_hba_domain(self, hba_id):
for domain in self.domain_data:
for wwn in domain["WWN"]:
if hba_id == wwn["WWN"]:
return domain
def get_pool_by_name(self, poolname):
for pool in self.pool_data:
if pool["name"] == poolname:
return pool
def get_pool_by_id(self, pool_id):
for pool in self.pool_data:
if pool["poolID"] == pool_id:
return pool
def list_array(self, **kwargs):
data = self.get_array_data()
print(json.dumps(data, indent=4))
def list_arrays(self, **kwargs):
data = self.get_array_data(scoped=False)
print(json.dumps(data, indent=4))
def list_pools(self, **kwargs):
data = self.get_pool_data()
print(json.dumps(data, indent=4))
def list_arraygroups(self, **kwargs):
data = self.get_arraygroup_data()
print(json.dumps(data, indent=4))
def list_ports(self, **kwargs):
data = self.get_port_data()
print(json.dumps(data, indent=4))
def list_logicalunits(self, devnum=None, **kwargs):
data = self.get_lu_data(devnum=devnum)
print(json.dumps(data, indent=4))
def list_domains(self, **kwargs):
data = self.get_domain_data()
print(json.dumps(data, indent=4))
def translate_mappings(self, mappings):
internal_mappings = {}
for mapping in mappings:
elements = mapping.split(":")
hba_id = elements[0]
targets = elements[-1].split(",")
domain = self.get_hba_domain(hba_id)["domainID"]
if domain not in self.domain_portname:
self.domain_portname[domain] = []
self.domain_portname[domain].append(hba_id)
if domain not in internal_mappings:
internal_mappings[domain] = set()
for tgt_id in targets:
port = self.get_target_port(tgt_id)["displayName"]
if port is None:
continue
if port not in self.port_portname:
self.port_portname[port] = []
self.port_portname[port].append(tgt_id)
internal_mappings[domain].add(port)
return internal_mappings
def del_map(self, devnum=None, mappings=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
devnum = self.to_devnum(devnum)
results = []
if mappings is not None:
internal_mappings = self.translate_mappings(mappings)
else:
internal_mappings = {}
for dom in self.domain_data:
for path in dom["Path"]:
if devnum == path["devNum"]:
domain = path["domainID"]
portname = path["portName"]
if domain not in internal_mappings:
internal_mappings[domain] = set()
internal_mappings[domain].add(portname)
for domain, portnames in internal_mappings.items():
for portname in portnames:
result = self._del_map(devnum=devnum, domain=domain, portname=portname, **kwargs)
if result is not None:
results.append(result)
if len(results) > 0:
return results
def _del_map(self, devnum=None, domain=None, portname=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
if domain is None:
raise ex.excError("--domain is mandatory")
if portname is None:
raise ex.excError("--portname is mandatory")
cmd = [
"deletelun",
"devnum="+str(devnum),
"portname="+portname,
"domain="+domain,
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
def add_map(self, devnum=None, mappings=None, lun=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
devnum = self.to_devnum(devnum)
if mappings is None:
raise ex.excError("--mappings is mandatory")
if lun is None:
hba_ids = [mapping.split(":")[0] for mapping in mappings]
lun = self.get_hba_free_lun_id(hba_ids)
if lun is None:
raise ex.excError("Unable to find a free lun id")
results = []
if mappings is not None:
internal_mappings = self.translate_mappings(mappings)
for domain, portnames in internal_mappings.items():
for portname in portnames:
result = self._add_map(devnum=devnum, domain=domain, portname=portname, lun=lun, **kwargs)
if result is not None:
results.append(result)
if len(results) > 0:
return results
def _add_map(self, devnum=None, domain=None, portname=None, lun=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
if domain is None:
raise ex.excError("--domain is mandatory")
if portname is None:
raise ex.excError("--portname is mandatory")
if lun is None:
raise ex.excError("--lun is mandatory")
domain = str(domain)
devnum = str(devnum)
for dom in self.domain_data:
for path in dom["Path"]:
if portname == path["portName"] and \
domain == path["domainID"] and \
devnum == path["devNum"]:
print("Device %s is already mapped to port %s in domain %s" % (devnum, portname, domain))
return
cmd = [
"addlun",
"devnum="+str(devnum),
"portname="+portname,
"domain="+domain,
"lun="+str(lun),
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
data = self.parse(out)
return data[0]["Path"][0]
def add_disk(self, name=None, pool=None, size=None, lun=None, mappings=None, **kwargs):
if pool is None:
raise ex.excError("--pool is mandatory")
if size == 0 or size is None:
raise ex.excError("--size is mandatory")
pool_id = self.get_pool_by_name(pool)["poolID"]
cmd = [
"addvirtualvolume",
"capacity="+str(convert_size(size, _to="KB")),
"capacitytype=KB",
"poolid="+str(pool_id),
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
data = self.parse(out)
ret = data[0]["ArrayGroup"][0]["Lu"][0]
if name:
self.rename_disk(devnum=ret["devNum"], name=name)
if mappings:
self.add_map(name=name, devnum=ret["devNum"], lun=lun, mappings=mappings)
lun_data = self.get_lu_data(devnum=ret["displayName"])[0]
self.push_diskinfo(lun_data, name, size)
mappings = {}
for path in lun_data["Path"]:
domain = path["domainID"]
port = path["portName"]
if domain not in self.domain_portname:
continue
if port not in self.port_portname:
continue
for hba_id in self.domain_portname[domain]:
for tgt_id in self.port_portname[port]:
mappings[hba_id+":"+tgt_id] = {
"hba_id": hba_id,
"tgt_id": tgt_id,
"lun": int(path["LUN"]),
}
results = {
"disk_id": ".".join(lun_data["objectID"].split(".")[-2:]),
"disk_devid": lun_data["displayName"],
"mappings": mappings,
"driver_data": {
"lu": lun_data,
},
}
return results
def resize_disk(self, devnum=None, size=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
devnum = self.to_devnum(devnum)
if size == 0 or size is None:
raise ex.excError("--size is mandatory")
if size.startswith("+"):
incr = convert_size(size.lstrip("+"), _to="KB")
data = self.get_logicalunit(devnum=devnum)
current_size = int(data["capacityInKB"])
size = str(current_size + incr)
else:
size = str(convert_size(size, _to="KB"))
cmd = [
"modifyvirtualvolume",
"capacity="+size,
"capacitytype=KB",
"devnums="+str(devnum),
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
def del_disk(self, devnum=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
devnum = self.to_devnum(devnum)
self.del_map(devnum=devnum)
cmd = [
"deletevirtualvolume",
"devnums="+str(devnum),
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
self.del_diskinfo(devnum)
def rename_disk(self, devnum=None, name=None, **kwargs):
if devnum is None:
raise ex.excError("--devnum is mandatory")
if name is None:
raise ex.excError("--name is mandatory")
devnum = self.to_devnum(devnum)
cmd = [
"modifylabel",
"devnums="+str(devnum),
"label="+str(name),
]
out, err, ret = self.cmd(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
data = self.parse(out)
return data[0]
def del_diskinfo(self, disk_id):
if disk_id in (None, ""):
return
if self.node is None:
return
try:
ret = self.node.collector_rest_delete("/disks/%s" % disk_id)
except Exception as exc:
raise ex.excError(str(exc))
if "error" in ret:
raise ex.excError(ret["error"])
return ret
def push_diskinfo(self, data, name, size):
if self.node is None:
return
try:
ret = self.node.collector_rest_post("/disks", {
"disk_id": self.serial+"."+str(data["devNum"]),
"disk_devid": data["devNum"],
"disk_name": str(name),
"disk_size": convert_size(size, _to="MB"),
"disk_alloc": 0,
"disk_arrayid": self.name,
"disk_group": self.get_pool_by_id(data["dpPoolID"]),
})
except Exception as exc:
raise ex.excError(str(exc))
if "error" in data:
raise ex.excError(ret["error"])
return ret
def do_action(action, array_name=None, node=None, **kwargs):
o = Arrays()
array = o.get_array(array_name)
if array is None:
raise ex.excError("array %s not found" % array_name)
array.node = node
if not hasattr(array, action):
raise ex.excError("not implemented")
ret = getattr(array, action)(**kwargs)
if ret is not None:
print(json.dumps(ret, indent=4))
def main(argv, node=None):
parser = OptParser(prog=PROG, options=OPT, actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
global_options=GLOBAL_OPTS)
options, action = parser.parse_args(argv)
kwargs = vars(options)
do_action(action, node=node, **kwargs)
if __name__ == "__main__":
try:
ret = main(sys.argv)
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
except IOError as exc:
if exc.errno == 32:
# broken pipe
ret = 1
else:
raise
sys.exit(ret)
opensvc-1.8~20170412/lib/rcPkgDarwin.py 0000644 0001750 0001750 00000001741 13073467726 017621 0 ustar jkelbert jkelbert import os
from rcUtilities import call, which
from rcGlobalEnv import rcEnv
"""
format:
package-id: com.apple.pkg.X11User
version: 10.6.0.1.1.1238328574
volume: /
location: /
install-time: 1285389505
groups: com.apple.snowleopard-repair-permissions.pkg-group com.apple.FindSystemFiles.pkg-group
"""
def pkgversion(package):
cmd = ['pkgutil', '--pkg-info', package]
(ret, out, err) = call(cmd, errlog=False, cache=True)
for line in out.split('\n'):
l = line.split(': ')
if len(l) != 2:
continue
if l[0] == 'version':
return l[1]
return ''
def listpkg():
if which('pkgutil') is None:
return []
cmd = ['pkgutil', '--packages']
(ret, out, err) = call(cmd, errlog=False, cache=True)
lines = []
for line in out.split('\n'):
if len(line) == 0:
continue
x = [rcEnv.nodename, line, pkgversion(line), ""]
lines.append(x)
return lines
def listpatch():
return []
opensvc-1.8~20170412/lib/resDiskVgLinux.py 0000644 0001750 0001750 00000022221 13073467726 020323 0 ustar jkelbert jkelbert import re
import os
import rcExceptions as ex
import resDisk
from rcGlobalEnv import rcEnv
from rcUtilitiesLinux import major, get_blockdev_sd_slaves, \
devs_to_disks, udevadm_settle
from rcUtilities import which, justcall, cache
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
name=None,
**kwargs):
self.label = "vg "+name
self.tag = rcEnv.nodename
resDisk.Disk.__init__(self,
rid=rid,
name=name,
type='disk.vg',
**kwargs)
def info(self):
data = [
["name", self.name],
]
return self.fmt_info(data)
def is_child_dev(self, device):
l = device.split("/")
if len(l) != 4 or l[1] != "dev":
return False
if l[2] == "mapper":
dmname = l[3]
if "-" not in dmname:
return False
i = 0
dmname.replace("--", "#")
_l = dmname.split("-")
if len(_l) != 2:
return False
vgname = _l[0].replace("#", "-")
else:
vgname = l[2]
if vgname == self.name:
return True
return False
def has_it(self):
data = self.get_tags()
if self.name in data:
return True
return False
def is_up(self):
"""Returns True if the volume group is present and activated
"""
if not self.has_it():
return False
data = self.get_lvs_attr()
if self.name not in data:
# no lv ... happens in provisioning, where lv are not created yet
self.log.info("no logical volumes. consider up")
return True
for attr in data[self.name].values():
if re.search('....a.', attr) is not None:
# at least one lv is active
return True
return False
@cache("vg.lvs.attr")
def get_lvs_attr(self):
cmd = ['lvs', '-o', 'vg_name,lv_name,lv_attr', '--noheadings', '--separator=;']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError
data = {}
for line in out.splitlines():
l = line.split(";")
if len(l) != 3:
continue
vgname = l[0].strip()
lvname = l[1].strip()
attr = l[2].strip()
if vgname not in data:
data[vgname] = {}
data[vgname][lvname] = attr
return data
@cache("vg.tags")
def get_tags(self):
cmd = ['vgs', '-o', 'vg_name,tags', '--noheadings', '--separator=;']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError
data = {}
for line in out.splitlines():
l = line.split(";")
if len(l) == 1:
data[l[0].strip()] = []
if len(l) == 2:
data[l[0].strip()] = l[1].strip().split(",")
return data
def test_vgs(self):
data = self.get_tags()
if self.name not in data:
self.clear_cache("vg.tags")
return False
return True
def remove_tag(self, tag):
cmd = [ 'vgchange', '--deltag', '@'+tag, self.name ]
(ret, out, err) = self.vcall(cmd)
self.clear_cache("vg.tags")
def list_tags(self, tags=[]):
tmo = 5
try:
self.wait_for_fn(self.test_vgs, tmo, 1, errmsg="vgs is still reporting the vg as not found after %d seconds"%tmo)
except ex.excError as e:
self.log.warning(str(e))
ret, out, err = self.vcall(["pvscan"])
# last chance
data = self.get_tags()
if self.name not in data:
raise ex.excError("vg %s not found" % self.name)
return data[self.name]
def remove_tags(self, tags=[]):
for tag in tags:
tag = tag.lstrip('@')
if len(tag) == 0:
continue
self.remove_tag(tag)
def add_tags(self):
cmd = [ 'vgchange', '--addtag', '@'+self.tag, self.name ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.clear_cache("vg.tags")
def activate_vg(self):
cmd = [ 'vgchange', '-a', 'y', self.name ]
ret, out, err = self.vcall(cmd)
self.clear_cache("vg.lvs")
self.clear_cache("vg.lvs.attr")
self.clear_cache("vg.tags")
if ret != 0:
raise ex.excError
def _deactivate_vg(self):
cmd = [ 'vgchange', '-a', 'n', self.name ]
ret, out, err = self.vcall(cmd, err_to_info=True)
self.clear_cache("vg.lvs")
self.clear_cache("vg.lvs.attr")
self.clear_cache("vg.tags")
if ret == 0:
return True
if not self.is_up():
return True
return False
def deactivate_vg(self):
self.wait_for_fn(self._deactivate_vg, 3, 1, errmsg="deactivation failed to release all logical volumes")
def do_start(self):
curtags = self.list_tags()
tags_to_remove = set(curtags) - set([self.tag])
if len(tags_to_remove) > 0:
self.remove_tags(tags_to_remove)
if self.tag not in curtags:
self.add_tags()
if self.is_up():
self.log.info("%s is already up" % self.label)
return 0
self.can_rollback = True
self.activate_vg()
def remove_dev_holders(self, devpath, tree):
dev = tree.get_dev_by_devpath(devpath)
holders_devpaths = set()
holder_devs = dev.get_children_bottom_up()
for holder_dev in holder_devs:
holders_devpaths |= set(holder_dev.devpath)
holders_devpaths -= set(dev.devpath)
holders_handled_by_resources = self.svc.devlist(filtered=False) & holders_devpaths
if len(holders_handled_by_resources) > 0:
raise ex.excError("resource %s has holders handled by other resources: %s" % (self.rid, ", ".join(holders_handled_by_resources)))
for holder_dev in holder_devs:
holder_dev.remove(self)
def remove_holders(self):
import glob
import rcDevTreeLinux
tree = rcDevTreeLinux.DevTree()
tree.load()
for lvdev in glob.glob("/dev/mapper/%s-*"%self.name.replace("-", "--")):
if "_rimage_" in lvdev or "_rmeta_" in lvdev or \
"_mimage_" in lvdev or " _mlog_" in lvdev or \
lvdev.endswith("_mlog"):
continue
self.remove_dev_holders(lvdev, tree)
def do_stop(self):
if not self.is_up():
self.log.info("%s is already down" % self.label)
return
self.remove_holders()
self.remove_tags([self.tag])
udevadm_settle()
self.deactivate_vg()
@cache("vg.lvs")
def vg_lvs(self):
cmd = ['vgs', '--noheadings', '-o', 'vg_name,lv_name', '--separator', ';']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError
data = {}
for line in out.splitlines():
try:
vgname, lvname = line.split(";")
vgname = vgname.strip()
except:
pass
if vgname not in data:
data[vgname] = []
data[vgname].append(lvname.strip())
return data
@cache("vg.pvs")
def vg_pvs(self):
cmd = ['vgs', '--noheadings', '-o', 'vg_name,pv_name', '--separator', ';']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError
data = {}
for line in out.splitlines():
try:
vgname, pvname = line.split(";")
vgname = vgname.strip()
except:
pass
if vgname not in data:
data[vgname] = []
data[vgname].append(pvname.strip())
return data
def devlist(self):
if not self.has_it():
return set()
if self.devs != set():
return self.devs
self.devs = set()
data = self.vg_pvs()
if self.name in data:
self.devs |= set(data[self.name])
data = self.vg_lvs()
if self.name in data:
l = []
for lvname in data[self.name]:
lvp = "/dev/"+self.name+"/"+lvname
if os.path.exists(lvp):
self.devs.add(lvp)
if len(self.devs) > 0:
self.log.debug("found devs %s held by vg %s" % (self.devs, self.name))
return self.devs
def disklist(self):
if self.disks != set():
return self.disks
self.disks = set()
pvs = self.devlist()
self.disks = devs_to_disks(self, pvs)
self.log.debug("found disks %s held by vg %s" % (self.disks, self.name))
return self.disks
def unprovision(self):
m = __import__("provDiskVgLinux")
prov = getattr(m, "ProvisioningDisk")(self)
prov.unprovisioner()
def provision(self):
m = __import__("provDiskVgLinux")
prov = getattr(m, "ProvisioningDisk")(self)
prov.provisioner()
opensvc-1.8~20170412/lib/rcStatsCollectSunOS.py 0000644 0001750 0001750 00000013622 13073467726 021270 0 ustar jkelbert jkelbert """
YYYY-MM-DD hh:mm:ss ZONE SWAP RSS CAP at avgat pg avgpg NPROC mem% cpu% TIME LastReboot
"""
from __future__ import print_function
import os, sys, platform
import time
import datetime
import subprocess
from rcUtilities import justcall, which
def collect(node):
now = str(datetime.datetime.now())
zs_d = os.path.join(os.sep, 'var', 'adm', 'zonestat')
zs_prefix = 'zs'
zs_f = os.path.join(zs_d, zs_prefix + datetime.datetime.now().strftime("%d"))
datenow = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
n = datetime.datetime.now()
tn = time.mktime(n.timetuple())
if not os.path.exists(zs_d):
os.makedirs(zs_d)
try:
t = os.path.getmtime(zs_f)
d = tn - t
except:
d = 0
if d > 27*24*3600:
os.remove(zs_f)
f = open(zs_f, "a")
stor = {}
p = subprocess.Popen('/usr/bin/prstat -Z -n1,60 1 1',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
bufsize=0)
out = p.stdout.readline()
pr = 0
while out:
line = out
line = line.rstrip("\n")
if "ZONEID" in line:
pr = 1
out = p.stdout.readline()
continue
if "Total:" in line:
pr = 0
out = p.stdout.readline()
continue
if "%" in line and pr == 1:
fields = line.split()
stor[fields[7]] = {
'SWAP': fields[2],
'RSS': fields[3],
'CAP': '0',
'at': '0',
'avgat': '0',
'pg': '0',
'avgpg': '0',
'NPROC': fields[1],
'mem': fields[4],
'cpu': fields[6],
'TIME': fields[5]
}
out = p.stdout.readline()
p.wait()
fi=1
pr=0
p = subprocess.Popen('/usr/bin/rcapstat -z 1 2',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0,
shell=True)
out = p.stdout.readline()
while out:
line = out
line = line.rstrip("\n")
if "id zone" in line and fi == 1:
fi = 0
out = p.stdout.readline()
continue
if "id zone" in line and fi == 0:
pr = 1
out = p.stdout.readline()
continue
if not "id zone" in line and pr == 1:
fields = line.split()
stor[fields[1]]['CAP'] = fields[5]
stor[fields[1]]['at'] = fields[6]
stor[fields[1]]['avgat'] = fields[7]
stor[fields[1]]['pg'] = fields[8]
stor[fields[1]]['avgpg'] = fields[9]
out = p.stdout.readline()
p.wait()
for z in stor:
zn = z
if z == 'global' :
zn = platform.node()
p = subprocess.Popen('/usr/bin/who -b',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0,
shell=True)
else :
p = subprocess.Popen('/usr/sbin/zlogin ' + z + ' /usr/bin/who -b',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=0,
shell=True)
out = p.stdout.readline()
txt = out.split()
print(datenow, zn, stor[z]['SWAP'], stor[z]['RSS'], stor[z]['CAP'], stor[z]['at'], stor[z]['avgat'], stor[z]['pg'], stor[z]['avgpg'], stor[z]['NPROC'], stor[z]['mem'], stor[z]['cpu'], stor[z]['TIME'], txt[-3], txt[-2], txt[-1], file=f)
p.wait()
"""
fs_u
"""
def fs_u():
vars = ['date',
'nodename',
'mntpt',
'size',
'used']
vals = []
vals += fs_u_t("vxfs")
vals += fs_u_t("ufs")
vals += fs_u_zfs()
return vars, vals
def fs_u_t(t):
if not which('df'):
return []
cmd = ['df', '-F', t, '-k']
(out,err,ret) = justcall(cmd)
if ret != 0:
return []
lines = out.split('\n')
if len(lines) < 2:
return []
vals = []
for line in lines[1:]:
l = line.split()
if len(l) == 5:
l = [''] + l
elif len(l) != 6:
continue
vals.append([now, node.nodename, l[5], l[1], l[4].replace('%','')])
return vals
def fs_u_zfs():
if not which('zfs'):
return []
cmd = ['zfs', 'list', '-o', 'name,used,avail,mountpoint', '-H']
(out,err,ret) = justcall(cmd)
if ret != 0:
return []
lines = out.split('\n')
if len(lines) == 0:
return []
vals = []
for line in lines:
l = line.split()
if len(l) != 4:
continue
if "@" in l[0]:
# do not report clone usage
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
used = convert(l[1])
if l[2] == '0':
l[2] = '0K'
avail = convert(l[2])
total = used + avail
pct = used / total * 100
vals.append([now, node.nodename, l[0], str(total), str(pct)])
return vals
def convert(s):
s = s.replace(',', '.')
if len(s) < 2:
raise
if s.endswith('T'):
s = float(s[:-1])*1024*1024*1024
elif s.endswith('G'):
s = float(s[:-1])*1024*1024
elif s.endswith('M'):
s = float(s[:-1])*1024
elif s.endswith('K'):
s = float(s[:-1])
else:
raise
return s
node.collector.call('push_stats_fs_u', fs_u())
opensvc-1.8~20170412/lib/resIpCrossbow.py 0000644 0001750 0001750 00000010503 13073467726 020206 0 ustar jkelbert jkelbert import time
import resIpSunOS as Res
import rcExceptions as ex
from subprocess import *
from rcGlobalEnv import rcEnv
from rcUtilities import which, to_cidr, bdecode
rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname)
class Ip(Res.Ip):
def __init__(self,
rid=None,
ipdev=None,
ipname=None,
mask=None,
gateway=None,
ipdevExt="v4",
**kwargs):
self.ipdevExt = ipdevExt
Res.Ip.__init__(self,
rid=rid,
ipdev=ipdev,
ipname=ipname,
mask=mask,
gateway=gateway,
**kwargs)
self.label = self.label + "/" + self.ipdevExt
if not which('ipadm'):
raise ex.excInitError("crossbow ips are not supported on this system")
if 'noalias' not in self.tags:
self.tags.add('noalias')
def stopip_cmd(self):
ret, out, err = (0, '', '')
if self.gateway is not None:
cmd=['route', '-q', 'delete', 'default', self.gateway]
r, o, e = self.call(cmd, info=True, outlog=False, errlog=False)
ret += r
cmd=['ipadm', 'delete-addr', self.stacked_dev+'/'+self.ipdevExt]
r, o, e = self.vcall(cmd)
ret += r
out += o
err += e
cmd = ['ipadm', 'show-addr', '-p', '-o', 'state', self.stacked_dev ]
p = Popen(cmd, stdin=None, stdout=PIPE, stderr=PIPE, close_fds=True)
_out = bdecode(p.communicate()[0]).strip().split("\n")
if len(_out) > 0:
self.log.info("skip delete-ip because addrs still use the ip")
return ret, out, err
cmd=['ipadm', 'delete-ip', self.stacked_dev]
r, o, e = self.vcall(cmd)
ret += r
out += o
err += e
return ret, out, err
def wait_net_smf(self, max_wait=30):
r = 0
prev_s = None
while True:
s = self.get_smf_status("network/routing-setup")
if s == "online":
break
if s != prev_s or prev_s is None:
self.log.info("waiting for network/routing-setup online state. current state: %s" % s)
prev_s = s
r += 1
if r > max_wait:
self.log.error("timeout waiting for network/routing-setup online state")
break
time.sleep(1)
def get_smf_status(self, fmri):
cmd = ["/usr/bin/svcs", "-H", "-o", "state", fmri]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return "undef"
return bdecode(out).strip()
def startip_cmd(self):
self.wait_net_smf()
ret, out, err = (0, '', '')
cmd = ['ipadm', 'show-if', '-p', '-o', 'state', self.stacked_dev]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
_out = bdecode(p.communicate()[0]).strip().split("\n")
if len(_out) == 0:
cmd=['ipadm', 'create-ip', '-t', self.stacked_dev ]
r, o, e = self.vcall(cmd)
cmd=['ipadm', 'create-addr', '-t', '-T', 'static', '-a', self.addr+"/"+to_cidr(self.mask), self.stacked_dev+'/'+self.ipdevExt]
r, o, e = self.vcall(cmd)
if r != 0:
cmd=['ipadm', 'show-if' ]
self.vcall(cmd)
raise ex.excError("Interface %s is not up. ipadm cannot create-addr over it. Retrying..." % self.stacked_dev)
ret += r
out += o
err += e
if self.gateway is not None:
cmd=['route', '-q', 'add', 'default', self.gateway]
r, o, e = self.call(cmd, info=True, outlog=False, errlog=False)
ret += r
return ret, out, err
def allow_start(self):
if 'noaction' in self.tags:
raise ex.IpNoActions(self.addr)
retry = 10
interval = 3
import time
ok = False
if self.is_up() is True:
self.log.info("%s is already up on %s" % (self.addr, self.ipdev))
raise ex.IpAlreadyUp(self.addr)
if not hasattr(self, 'abort_start_done') and 'nonrouted' not in self.tags and self.check_ping():
self.log.error("%s is already up on another host" % (self.addr))
raise ex.IpConflict(self.addr)
opensvc-1.8~20170412/lib/rcIbmDs.py 0000644 0001750 0001750 00000006443 13073467726 016735 0 ustar jkelbert jkelbert import rcExceptions as ex
import os
import ConfigParser
import tempfile
import sys
from subprocess import *
from rcGlobalEnv import rcEnv
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
def dscli(cmd, hmc1, hmc2, username, pwfile, log=None):
if len(hmc1) != 0:
_hmc1 = ['-hmc1', hmc1]
else:
_hmc1 = []
if len(hmc2) != 0:
_hmc2 = ['-hmc2', hmc2]
else:
_hmc2 = []
_cmd = ['/opt/ibm/dscli/dscli'] + _hmc1 + _hmc2 +['-user', username, '-pwfile', pwfile]
if log is not None:
log.info(cmd + ' | ' + ' '.join(_cmd))
p = Popen(_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(input=cmd)
out = out.replace("dscli>", "")
err = err.replace("dscli>", "")
if log is not None:
if len(out) > 0:
log.info(out)
if len(err) > 0:
log.error(err)
if p.returncode != 0:
#print >>sys.stderr, out, err
raise ex.excError("dscli command execution error")
return out, err
class IbmDss(object):
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.arrays = []
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = {}
for s in conf.sections():
if not conf.has_option(s, "type") or \
conf.get(s, "type") != "ibmds":
continue
if self.filtering and not s in self.objects:
continue
pwfile = os.path.join(rcEnv.pathvar, s+'.pwfile')
if not os.path.exists(pwfile):
raise ex.excError("%s does not exists. create it with 'dscli managepwfile ...'"%pwfile)
try:
username = conf.get(s, 'username')
hmc1 = conf.get(s, 'hmc1')
hmc2 = conf.get(s, 'hmc2')
m[s] = [hmc1, hmc2, username, pwfile]
except Exception as e:
print("error parsing section", s, ":", e)
pass
del(conf)
for name, creds in m.items():
hmc1, hmc2, username, pwfile = creds
self.arrays.append(IbmDs(name, hmc1, hmc2, username, pwfile))
def __iter__(self):
for array in self.arrays:
yield(array)
def get(self, array):
for o in self.arrays:
if o.name == array:
return o
raise ex.excError("%s not defined in auth.conf or not usable" % array)
class IbmDs(object):
def __init__(self, name, hmc1, hmc2, username, pwfile):
self.name = name
self.username = username
self.pwfile = pwfile
self.hmc1 = hmc1
self.hmc2 = hmc2
self.keys = ['combo']
def dscli(self, cmd, log=None):
return dscli(cmd, self.hmc1, self.hmc2, self.username, self.pwfile, log=log)
def get_combo(self):
cmd = """setenv -banner off -header on -format delim
lsextpool
lsfbvol
lsioport
lssi
lsarray
lsarraysite
lsrank"""
print("%s: %s"%(self.name, cmd))
return self.dscli(cmd)[0]
if __name__ == "__main__":
o = IbmDss()
for ibmds in o:
print(ibmds.get_combo())
opensvc-1.8~20170412/lib/resFsSgLinux.py 0000644 0001750 0001750 00000001231 13073467726 017774 0 ustar jkelbert jkelbert from rcGlobalEnv import rcEnv
Res = __import__("resFsLinux")
class Mount(Res.Mount):
def __init__(self, **kwargs):
self.sgname = kwargs.get("device", None)
Res.Mount.__init__(self, **kwargs)
def is_up(self):
if 'resource' in self.svc.cmviewcl and \
self.mon_name in self.svc.cmviewcl['resource']:
state = self.svc.cmviewcl['resource'][self.mon_name][('status', rcEnv.nodename)]
if state == "up":
return True
else:
return False
else:
return Res.Mount.is_up(self)
def start(self):
pass
def stop(self):
pass
opensvc-1.8~20170412/lib/rcOsLinux.py 0000644 0001750 0001750 00000000330 13073467726 017325 0 ustar jkelbert jkelbert import shutil
import rcOs
class Os(rcOs.Os):
def reboot(self):
with open("/proc/sysrq-trigger", "w") as f:
f.write("b")
def crash(self):
shutil.copyfile('/dev/zero', '/dev/mem')
opensvc-1.8~20170412/lib/resFsVcsLinux.py 0000644 0001750 0001750 00000001035 13073467726 020160 0 ustar jkelbert jkelbert import rcStatus
import rcExceptions as ex
from rcGlobalEnv import rcEnv
Res = __import__("resFsLinux")
class Mount(Res.Mount):
def _status(self, verbose=False):
try:
s = self.svc.get_res_val(self.vcs_name, 'State')
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if s == "ONLINE":
return rcStatus.UP
elif s == "OFFLINE":
return rcStatus.DOWN
else:
self.status_log(s)
return rcStatus.WARN
opensvc-1.8~20170412/lib/resDiskVgSgLinux.py 0000644 0001750 0001750 00000000210 13073467726 020607 0 ustar jkelbert jkelbert Res = __import__("resDiskVgLinux")
class Disk(Res.Disk):
def start(self):
return 0
def stop(self):
return 0
opensvc-1.8~20170412/lib/rcSysReportOSF1.py 0000644 0001750 0001750 00000000230 13073467726 020326 0 ustar jkelbert jkelbert import rcSysReport
class SysReport(rcSysReport.SysReport):
def __init__(self, node=None):
rcSysReport.SysReport.__init__(self, node=node)
opensvc-1.8~20170412/lib/resFsLinux.py 0000644 0001750 0001750 00000042010 13073467726 017502 0 ustar jkelbert jkelbert """
Linux Fs resource driver module
"""
import os
from stat import ST_MODE, S_ISREG, S_ISBLK
import rcMountsLinux as rcMounts
import resFs as Res
from rcUtilities import qcall, protected_mount, getmount, justcall
from rcUtilitiesLinux import major, get_blockdev_sd_slaves, lv_exists, devs_to_disks, label_to_dev
from rcLoopLinux import file_to_loop
import rcExceptions as ex
from rcZfs import zfs_getprop, zfs_setprop
class Mount(Res.Mount):
"""
Linux Fs resource driver
"""
def __init__(self, **kwargs):
self.mounts = None
Res.Mount.__init__(self, **kwargs)
dev_realpath = os.path.realpath(self.device)
if self.device.startswith("/dev/disk/by-") or dev_realpath.startswith("/dev/rbd"):
self.device = dev_realpath
# 0 - No errors
# 1 - File system errors corrected
# 32 - E2fsck canceled by user request
self.fsck_h = {
'ext2': {
'bin': 'e2fsck',
'cmd': ['e2fsck', '-p', self.device],
'allowed_ret': [0, 1, 32, 33]
},
'ext3': {
'bin': 'e2fsck',
'cmd': ['e2fsck', '-p', self.device],
'allowed_ret': [0, 1, 32, 33]
},
'ext4': {
'bin': 'e2fsck',
'cmd': ['e2fsck', '-p', self.device],
'allowed_ret': [0, 1, 32, 33]
},
}
self.loopdevice = None
self.dm_major = None
def umount_generic(self, mnt=None):
if mnt is None:
mnt = self.mount_point
cmd = ['umount', mnt]
return self.vcall(cmd, err_to_warn=True)
def try_umount(self, mnt=None):
if mnt is None:
mnt = self.mount_point
if self.fs_type == "zfs":
ret, out, err = self.umount_zfs()
else:
ret, out, err = self.umount_generic(mnt)
if ret == 0:
return 0
if "not mounted" in err:
return 0
# don't try to kill process using the source of a
# protected bind mount
if protected_mount(mnt):
return 1
# best effort kill of all processes that might block
# the umount operation. The priority is given to mass
# action reliability, ie don't contest oprator's will
cmd = ['sync']
ret, out, err = self.vcall(cmd)
if os.path.isdir(self.device):
fuser_opts = '-kv'
else:
fuser_opts = '-kmv'
for _ in range(4):
cmd = ['fuser', fuser_opts, mnt]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
self.log.info('umount %s', mnt)
cmd = ['umount', mnt]
ret = qcall(cmd)
if ret == 0:
break
return ret
def is_up(self):
self.mounts = rcMounts.Mounts()
ret = self.mounts.has_mount(self.device, self.mount_point)
if ret:
return True
# might be defined as a symlink. Linux display realpaths in /proc/mounts
ret = self.mounts.has_mount(self.device,
os.path.realpath(self.mount_point))
if ret:
return True
# might be defined as a symlink. Linux display realpaths in /proc/mounts
ret = self.mounts.has_mount(os.path.realpath(self.device),
os.path.realpath(self.mount_point))
if ret:
return True
# might be a mount by label or uuid
for dev in self.devlist():
ret = self.mounts.has_mount(dev, self.mount_point)
if ret:
return True
ret = self.mounts.has_mount(dev, os.path.realpath(self.mount_point))
if ret:
return True
# might be mount using a /dev/mapper/ name too
elements = self.device.split('/')
if len(elements) == 4 and elements[2] != "mapper":
dev = "/dev/mapper/%s-%s" % (elements[2].replace('-', '--'), elements[3].replace('-', '--'))
ret = self.mounts.has_mount(dev, self.mount_point)
if ret:
return True
ret = self.mounts.has_mount(dev, os.path.realpath(self.mount_point))
if ret:
return True
if os.path.exists(self.device):
try:
fstat = os.stat(self.device)
mode = fstat[ST_MODE]
except:
self.log.debug("can not stat %s", self.device)
return False
if S_ISREG(mode):
# might be a loopback mount
devs = file_to_loop(self.device)
for dev in devs:
ret = self.mounts.has_mount(dev, self.mount_point)
if ret:
return True
ret = self.mounts.has_mount(dev, os.path.realpath(self.mount_point))
if ret:
return True
elif S_ISBLK(mode):
# might be a mount using a /dev/dm- name too
dm_major = major('device-mapper')
if os.major(fstat.st_rdev) == dm_major:
dev = '/dev/dm-' + str(os.minor(fstat.st_rdev))
ret = self.mounts.has_mount(dev, self.mount_point)
if ret:
return True
ret = self.mounts.has_mount(dev, os.path.realpath(self.mount_point))
if ret:
return True
return False
def realdev(self):
if self.device.startswith("LABEL=") or self.device.startswith("UUID="):
_dev = label_to_dev(self.device)
if _dev:
return _dev
return self.device
try:
mode = os.stat(self.device)[ST_MODE]
except:
self.log.debug("can not stat %s", self.device)
return
if os.path.exists(self.device) and S_ISBLK(mode):
dev = self.device
else:
mnt = getmount(self.device)
if self.mounts is None:
self.mounts = rcMounts.Mounts()
mount = self.mounts.has_param("mnt", mnt)
if mount is None:
self.log.debug("can't find dev %s mounted in %s in mnttab",
mnt, self.device)
return None
dev = mount.dev
return dev
def mplist(self):
dev = self.realdev()
if dev is None:
return set([])
try:
self.dm_major = major('device-mapper')
except:
return set([])
return self._mplist([dev])
@staticmethod
def devname_to_dev(devname):
if 'cciss!' in devname:
return '/dev/cciss/'+devname.replace('cciss!', '')
return '/dev/'+devname
def _mplist(self, devs):
mps = set([])
for dev in devs:
devmap = False
if 'dm-' in dev:
minor = int(dev.replace('/dev/dm-', ''))
devname = dev.replace('/dev/', '')
devmap = True
else:
try:
statinfo = os.stat(dev)
except:
self.log.warning("can not stat %s", dev)
continue
minor = os.minor(statinfo.st_rdev)
devname = 'dm-%i'%minor
devmap = self.is_devmap(statinfo)
if self.is_multipath(minor):
mps |= set([dev])
elif devmap:
syspath = '/sys/block/' + devname + '/slaves'
if not os.path.exists(syspath):
continue
slaves = os.listdir(syspath)
mps |= self._mplist([self.devname_to_dev(slave) for slave in slaves])
return mps
def is_multipath(self, minor):
cmd = [
'dmsetup', '-j', str(self.dm_major),
'-m', str(minor),
'table'
]
ret, buff, err = self.call(cmd, errlog=False, cache=True)
if ret != 0:
return False
elements = buff.split()
if len(elements) < 3:
return False
if elements[2] != 'multipath':
return False
if 'queue_if_no_path' not in elements:
return False
cmd = [
'dmsetup', '-j', str(self.dm_major),
'-m', str(minor),
'status'
]
ret, buff, err = self.call(cmd, errlog=False, cache=True)
if ret != 0:
return False
elements = buff.split()
if elements.count('A') > 1:
return False
return True
def is_devmap(self, statinfo):
if os.major(statinfo.st_rdev) == self.dm_major:
return True
return False
def _disklist(self):
dev = self.realdev()
if dev is None:
return set([])
if dev.startswith("/dev/rbd"):
return set([])
try:
self.dm_major = major('device-mapper')
except:
return set([dev])
try:
statinfo = os.stat(dev)
except:
self.log.error("can not stat %s", dev)
raise ex.excError
if not self.is_devmap(statinfo):
return set([dev])
if lv_exists(self, dev):
# If the fs is built on a lv of a private vg, its
# disks will be given by the vg resource.
# if the fs is built on a lv of a shared vg, we
# don't want to account its disks : don't reserve
# them, don't account their size multiple times.
return set([])
devname = 'dm-' + str(os.minor(statinfo.st_rdev))
syspath = '/sys/block/' + devname + '/slaves'
devs = get_blockdev_sd_slaves(syspath)
return devs
def disklist(self):
return devs_to_disks(self, self._disklist())
def devlist(self):
dev = self.realdev()
if dev is None:
return set([])
return set([dev])
def can_check_writable(self):
if self.fs_type == "zfs":
return self._can_check_zfs_writable()
if len(self.mplist()) > 0:
self.log.debug("a multipath under fs has queueing enabled and no active path")
return False
return True
def start(self):
if self.mounts is None:
self.mounts = rcMounts.Mounts()
Res.Mount.start(self)
# loopback mount
# if the file has already been binded to a loop re-use
# the loopdev to avoid allocating another one
if os.path.exists(self.device):
try:
mode = os.stat(self.device)[ST_MODE]
if S_ISREG(mode):
devs = file_to_loop(self.device)
if len(devs) > 0:
self.loopdevice = devs[0]
mntopt_l = self.mount_options.split(',')
if "loop" in mntopt_l:
mntopt_l.remove("loop")
self.mount_options = ','.join(mntopt_l)
except Exception as exc:
raise ex.excError(str(exc))
if self.fs_type == "zfs":
self._check_zfs_canmount()
if self.is_up() is True:
self.log.info("%s is already mounted", self.label)
return 0
if self.fs_type == "btrfs":
cmd = ['btrfs', 'device', 'scan']
self.vcall(cmd)
self.fsck()
if not os.path.exists(self.mount_point):
try:
os.makedirs(self.mount_point, 0o755)
except Exception as exc:
raise ex.excError(str(exc))
if self.fs_type == "zfs":
self.mount_zfs()
else:
self.mount_generic()
self.mounts = None
self.can_rollback = True
def _can_check_zfs_writable(self):
pool = self.device.split("/")[0]
cmd = ["zpool", "status", pool]
out, err, ret = justcall(cmd)
if "state: SUSPENDED" in out:
self.status_log("pool %s is suspended" % pool)
return False
return True
def _check_zfs_canmount(self):
if 'noaction' not in self.tags and \
zfs_getprop(self.device, 'canmount') != 'noauto':
self.log.info("%s should be set to canmount=noauto (zfs set "
"canmount=noauto %s)", self.label, self.device)
def umount_zfs(self):
ret, out, err = self.vcall(['zfs', 'umount', self.device], err_to_info=True)
if ret != 0:
ret, out, err = self.vcall(['zfs', 'umount', '-f', self.device], err_to_info=True)
return ret, out, err
def mount_zfs(self):
if 'encap' not in self.tags and \
not self.svc.config.has_option(self.rid, 'zone') and \
zfs_getprop(self.device, 'zoned') != 'off':
if zfs_setprop(self.device, 'zoned', 'off'):
raise ex.excError
if zfs_getprop(self.device, 'mountpoint') != self.mount_point:
if not zfs_setprop(self.device, 'mountpoint', self.mount_point):
raise ex.excError
try:
os.unlink(self.mount_point+"/.opensvc")
except:
pass
ret, out, err = self.vcall(['zfs', 'mount', self.device])
if ret != 0:
ret, out, err = self.vcall(['zfs', 'mount', '-O', self.device])
if ret != 0:
raise ex.excError
def mount_generic(self):
if self.fs_type != "":
fstype = ['-t', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
if self.loopdevice is None:
device = self.device
else:
device = self.loopdevice
cmd = ['mount'] + fstype + mntopt + [device, self.mount_point]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def kill_users(self):
import glob
for path in glob.glob("/proc/*/fd/*") + glob.glob("/proc/*/cwd") + glob.glob("/proc/*/exe"):
try:
dest = os.path.realpath(path)
except:
continue
if dest.startswith(self.mount_point):
elements = path.split("/")
try:
pid = int(elements[2])
except:
continue
try:
with open("/proc/%d/cmdline" % pid, "r") as ofile:
cmdline = ofile.read()
except Exception as exc:
self.log.warning(str(exc))
cmdline = ""
self.log.info("kill -9 %d (cmdline: %s)", pid, cmdline)
os.kill(pid, 9)
def stop(self):
if self.mounts is None:
self.mounts = rcMounts.Mounts()
if self.is_up() is False:
self.log.info("%s is already umounted", self.label)
return
if not os.path.exists(self.mount_point):
raise ex.excError('mount point %s does not exist' % self.mount_point)
try:
os.stat(self.mount_point)
except OSError as exc:
if exc.errno == (5, 13):
self.log.warning("I/O error on mount point. try to umount anyway")
self.kill_users()
else:
raise ex.excError(str(exc))
self.remove_holders()
self.remove_deeper_mounts()
for _ in range(3):
ret = self.try_umount()
if ret == 0:
break
if ret != 0:
raise ex.excError('failed to umount %s'%self.mount_point)
self.mounts = None
def remove_dev_holders(self, devpath, tree):
dev = tree.get_dev_by_devpath(devpath)
if dev is None:
return
holders_devpaths = set()
holder_devs = dev.get_children_bottom_up()
for holder_dev in holder_devs:
holders_devpaths |= set(holder_dev.devpath)
holders_devpaths -= set(dev.devpath)
holders_handled_by_resources = self.svc.devlist(filtered=False) & holders_devpaths
if len(holders_handled_by_resources) > 0:
raise ex.excError("resource %s has holders handled by other "
"resources: %s" % (self.rid, ", ".join(holders_handled_by_resources)))
for holder_dev in holder_devs:
holder_dev.remove(self)
def remove_holders(self):
import glob
import rcDevTreeLinux
tree = rcDevTreeLinux.DevTree()
tree.load()
dev_realpath = os.path.realpath(self.device)
self.remove_dev_holders(dev_realpath, tree)
def remove_deeper_mounts(self):
mounts = rcMounts.Mounts()
mnt_realpath = os.path.realpath(self.mount_point)
for mount in mounts:
_mnt_realpath = os.path.realpath(mount.mnt)
if _mnt_realpath != mnt_realpath and \
_mnt_realpath.startswith(mnt_realpath+"/"):
ret = self.try_umount(_mnt_realpath)
if ret != 0:
break
opensvc-1.8~20170412/lib/rcLogger.py 0000644 0001750 0001750 00000014164 13073467726 017155 0 ustar jkelbert jkelbert import sys
import os
import errno
import logging
import logging.handlers
from rcGlobalEnv import rcEnv
from subprocess import *
min_name_len = 10
namelen = 10
namefmt = "%-"+str(namelen)+"s"
include_svcname = True
try:
type(PermissionError)
except:
PermissionError = IOError
import platform
import re
from rcColor import colorize, color
DEFAULT_HANDLERS = ["file", "stream", "syslog"]
class ColorStreamHandler(logging.StreamHandler):
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
def format(self, record):
text = logging.StreamHandler.format(self, record)
def c(line):
# remove date, keep time
line = re.sub(r'^....-..-.. ', "", line)
l = line.rstrip("\n").split(" - ")
if len(l) < 3:
return line
if not include_svcname:
l[1] = l[1].split(".")[-1]
if "#" not in l[1] and l[1] != "scheduler":
l[1] = ""
if len(l[1]) > namelen:
l[1] = "*"+l[1][-(namelen-1):]
l[1] = namefmt % l[1]
l[1] = colorize(l[1], color.BOLD)
l[2] = "%-7s" % l[2]
l[2] = l[2].replace("ERROR", colorize("ERROR", color.RED))
l[2] = l[2].replace("WARNING", colorize("WARNING", color.BROWN))
l[2] = l[2].replace("INFO", colorize("INFO", color.LIGHTBLUE))
return " ".join(l)
return c(text)
class LoggerHandler(logging.handlers.SysLogHandler):
def __init__(self, facility=logging.handlers.SysLogHandler.LOG_USER):
logging.Handler.__init__(self)
self.facility = facility
self.formatter = None
def close(self):
pass
def emit(self, record):
try:
msg = self.format(record)
cmd = ["logger", "-t", "", "-p", self.facility+"."+record.levelname.lower(), msg]
p = Popen(cmd, stdout=None, stderr=None, stdin=None, close_fds=True)
p.communicate()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def set_namelen(svcs):
global namelen
global namefmt
global include_svcname
maxlen = min_name_len
for svc in svcs:
if svc.disabled:
continue
for r in svc.resources_by_id.values():
if r is None:
continue
if r.is_disabled():
continue
l = len(r.rid)
if r.subset:
l += len(r.subset) + 1
if len(svcs) > 1:
include_svcname = True
l += len(svc.svcname) + 1
else:
include_svcname = False
if l > maxlen:
maxlen = l
namelen = maxlen
namefmt = "%-"+str(namelen)+"s"
def initLogger(name, handlers=None):
if handlers is None:
handlers = DEFAULT_HANDLERS
if name == rcEnv.nodename:
logfile = os.path.join(rcEnv.pathlog, "node") + '.log'
else:
logfile = os.path.join(rcEnv.pathlog, name) + '.log'
log = logging.getLogger(name)
log.handlers = []
if "file" in handlers:
try:
fileformatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
filehandler = logging.handlers.RotatingFileHandler(logfile,
maxBytes=5242880,
backupCount=5)
filehandler.setFormatter(fileformatter)
filehandler.setLevel(logging.DEBUG)
log.addHandler(filehandler)
except PermissionError:
pass
if "stream" in handlers:
streamformatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
streamhandler = ColorStreamHandler()
streamhandler.setFormatter(streamformatter)
log.addHandler(streamhandler)
if '--debug' in sys.argv:
rcEnv.loglevel = logging.DEBUG
streamhandler.setLevel(logging.DEBUG)
elif '--warn' in sys.argv:
rcEnv.loglevel = logging.WARNING
streamhandler.setLevel(logging.WARNING)
elif '--error' in sys.argv:
rcEnv.loglevel = logging.ERROR
streamhandler.setLevel(logging.ERROR)
else:
rcEnv.loglevel = logging.INFO
streamhandler.setLevel(logging.INFO)
if "syslog" in handlers:
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
config = ConfigParser.RawConfigParser({})
try:
config.read(rcEnv.nodeconf)
except:
pass
try:
facility = config.get("syslog", "facility")
except:
facility = "daemon"
try:
host = config.get("syslog", "host")
except:
host = None
try:
port = int(config.get("syslog", "port"))
except:
port = None
address = None
if host is None and port is None:
if os.path.exists("/dev/log"):
address = os.path.realpath("/dev/log")
elif os.path.exists("/var/run/syslog"):
address = os.path.realpath("/var/run/syslog")
if address is None:
if host is None:
host = "localhost"
if port is None:
port = 514
address = (host, port)
syslogformatter = logging.Formatter("opensvc: %(name)s %(message)s")
try:
sysloghandler = logging.handlers.SysLogHandler(address=address, facility=facility)
except Exception as e:
if e.errno == errno.ENOTSOCK:
# solaris /dev/log is a stream device
sysloghandler = LoggerHandler(facility=facility)
else:
sysloghandler = None
if sysloghandler:
sysloghandler.setLevel(logging.INFO)
sysloghandler.setFormatter(syslogformatter)
log.addHandler(sysloghandler)
log.setLevel(logging.DEBUG)
return log
opensvc-1.8~20170412/lib/rcDevTreeHP-UX.py 0000644 0001750 0001750 00000013751 13073467726 020057 0 ustar jkelbert jkelbert import rcDevTree
import glob
import os
import re
from subprocess import *
from rcUtilities import which
from rcGlobalEnv import rcEnv
dim = __import__("rcDiskInfoHP-UX")
di = dim.diskInfo()
class DevTree(rcDevTree.DevTree):
pe_size = {}
def add_part(self, parent_devpath, child_devpath):
child_dev = self.add_disk(child_devpath)
if child_dev is None:
return
parent_dev = self.get_dev_by_devpath(parent_devpath)
if parent_dev is None:
return
child_dev.add_parent(parent_dev.devname)
parent_dev.add_child(child_dev.devname)
def add_disk(self, devpath):
devname = devpath.split('/')[-1]
if devpath in self.lunmap:
devtype = "multipath"
else:
devtype = "linear"
size = di.disk_size(devpath)
# exclude 0-sized md, Symmetrix gatekeeper and vcmdb
if size in [0, 2, 30, 45]:
return
d = self.add_dev(devname, size, devtype)
d.set_devpath(devpath)
d.set_devpath(devpath.replace('/disk/', '/rdisk/').replace('/dsk/', '/rdsk/'))
if devpath in self.lunmap:
d.set_alias(self.lunmap[devpath]['wwid'])
else:
wwid = di.disk_id(devpath)
if wwid != "":
d.set_alias(wwid)
return d
def load_ioscan(self):
if not which("/usr/sbin/ioscan"):
return
cmd = ["/usr/sbin/ioscan", "-FunNC", "disk"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode:
if "illegal option -- N" not in err:
return
cmd = ["/usr/sbin/ioscan", "-FunC", "disk"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode:
return
"""
scsi:wsio:T:T:F:31:188:0:disk:sdisk:0/2/1/0.0.0.0.0:0 0 5 18 0 0 0 0 195 124 63 185 173 253 214 203 :0:root.sba.lba.sasd.sasd_vbus.tgt.sdisk:sdisk:CLAIMED:DEVICE:HP DG146BB976:0:
/dev/disk/disk11 /dev/disk/disk11_p2 /dev/rdisk/disk11 /dev/rdisk/disk11_p2
/dev/disk/disk11_p1 /dev/disk/disk11_p3 /dev/rdisk/disk11_p1 /dev/rdisk/disk11_p3
"""
for w in out.split():
if not w.startswith('/dev/'):
new = True
continue
if new:
disk = w
new = False
d = self.add_disk(disk)
continue
if d is None or '/rdisk/' in w:
continue
elif '/pt/' in w:
d.set_devpath(w)
elif '_p' in w:
self.add_part(disk, w)
else:
# arbitrary dsf alias
d.set_devpath(w)
def get_lunmap(self):
if hasattr(self, "lunmap"):
return
self.lunmap = {}
if not which("scsimgr"):
return
cmd = ["scsimgr", "lun_map"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode:
return
"""
LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk10
Total number of LUN paths = 1
World Wide Identifier(WWID) = 0x5000c5000aba6793
LUN path : lunpath0
Class = lunpath
Instance = 0
Hardware path = 0/2/1/0.0x5000c5000aba6791.0x0
SCSI transport protocol = sas
State = UNOPEN
Last Open or Close state = ACTIVE
"""
for line in out.split('\n'):
if "INFORMATION" in line:
disk = line.split()[-1]
self.lunmap[disk] = {}
if "WWID" in line:
wwid = line.split()[-1].replace('0x', '')
if wwid != "=":
self.lunmap[disk]['wwid'] = line.split()[-1].replace('0x', '')
else:
del(self.lunmap[disk])
def load_lv(self, lv):
if not which("lvdisplay"):
return
cmd = ["lvdisplay", "-v", lv]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode:
return
vgname = lv.split('/')[2]
# parser
h = {}
for line in out.split('\n'):
line = line.strip()
if 'LV Size' in line:
size = int(line.split()[-1])
if not line.startswith('/dev'):
continue
pv, le, pe = line.split()
h[pv] = int(pe) * self.pe_size[vgname]
# use the linux lvm naming convention
devname = lv.replace('/dev/','').replace('-','--').replace('/','-')
d = self.add_dev(devname, size, "linear")
for pv in h:
d.add_parent(pv.replace('/dev/disk/', ''), size=h[pv])
d.set_devpath(lv)
parent_dev = self.get_dev_by_devpath(pv)
if parent_dev is None:
continue
parent_dev.add_child(d.devname)
def load_lvm(self):
if not which("vgdisplay"):
return
cmd = ["vgdisplay", "-v"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
for line in out.split('\n'):
if 'VG Name' in line:
vgname = line.split()[-1].replace('/dev/','')
if 'PE Size' in line:
self.pe_size[vgname] = int(line.split()[-1])
if 'LV Name' not in line:
continue
self.load_lv(line.split()[-1])
def load(self, di=None):
self.get_lunmap()
self.load_ioscan()
self.load_lvm()
def blacklist(self, devname):
bl = [r'^loop[0-9]*.*', r'^ram[0-9]*.*', r'^scd[0-9]*', r'^sr[0-9]*']
for b in bl:
if re.match(b, devname):
return True
return False
if __name__ == "__main__":
tree = DevTree()
tree.load()
#print(tree)
tree.print_tree_bottom_up()
#print(map(lambda x: x.alias, tree.get_top_devs()))
opensvc-1.8~20170412/lib/hostidLinux.py 0000644 0001750 0001750 00000000102 13073467726 017706 0 ustar jkelbert jkelbert from uuid import getnode
def hostid():
return str(getnode())
opensvc-1.8~20170412/lib/rcScheduler.py 0000644 0001750 0001750 00000130601 13073467726 017647 0 ustar jkelbert jkelbert """
This module defines the Scheduler class inherited by Svc and Node.
"""
from __future__ import print_function
import sys
import os
import datetime
import json
import time
import random
import logging
import rcExceptions as ex
from rcGlobalEnv import rcEnv, Storage
from rcUtilities import is_string
from rcColor import formatter
SCHED_FMT = "%s: %s"
CALENDAR_NAMES = {
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
"mon": 0,
"tue": 1,
"wed": 2,
"thu": 3,
"fri": 4,
"sat": 5,
"sun": 6,
"monday": 0,
"tuesday": 1,
"wednesday": 2,
"thursday": 3,
"friday": 4,
"saturday": 5,
"sunday": 6,
}
def fork(func, args=None, kwargs=None, serialize=False, delay=300):
"""
A fork daemonizing function.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if os.fork() > 0:
# return to parent execution
return
# separate the son from the father
os.chdir('/')
os.setsid()
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except:
os._exit(1)
obj = args[0]
if obj.__class__.__name__ == "Compliance":
if obj.svc:
self = obj.svc
else:
self = obj.node
else:
# svc or node
self = obj
if self.sched.name == "node":
title = "node."+func.__name__.lstrip("_")
else:
title = self.sched.name+"."+func.__name__.lstrip("_")
if serialize:
lockfile = title+".fork.lock"
lockfile = os.path.join(rcEnv.pathlock, lockfile)
from lock import lock, unlock
try:
lockfd = lock(lockfile=lockfile, timeout=0, delay=0)
self.sched.sched_log(title, "lock acquired", "debug")
except Exception:
self.sched.sched_log(title, "task is already running", "warning")
os._exit(0)
# now wait for a random delay to not DoS the collector.
if delay > 0 and self.sched.name == "node":
delay = int(random.random()*delay)
self.sched.sched_log(title, "delay %d secs to level database load"%delay, "debug")
try:
time.sleep(delay)
except KeyboardInterrupt as exc:
self.log.error(exc)
os._exit(1)
try:
func(*args, **kwargs)
except Exception as exc:
if serialize:
unlock(lockfd)
self.log.error(exc)
os._exit(1)
if serialize:
unlock(lockfd)
os._exit(0)
def scheduler_fork(func):
"""
A decorator that runs the decorated function in a detached
subprocess if the cron option is set, else runs it inline.
"""
def _func(*args, **kwargs):
self = args[0]
if self.options.cron:
fork(func, args, kwargs, serialize=True, delay=59)
else:
func(*args, **kwargs)
return _func
class SchedNotAllowed(Exception):
"""
The exception signaling the task can not run due to scheduling
constaints.
"""
pass
class SchedNoDefault(Exception):
"""
The exception raised to signal a task has no default schedule
defined.
"""
pass
class SchedSyntaxError(Exception):
"""
The exception raised to signal the defined schedule has syntax
errors.
"""
pass
class SchedExcluded(Exception):
"""
The exception raised to signal a negative constraint violation.
"""
pass
class SchedOpts(object):
"""
The class storing a task schedule options.
"""
def __init__(self, section,
fname=None,
schedule_option="push_schedule"):
self.section = section
self.fname = fname
if self.fname is None:
self.fname = "node"+os.sep+"last_"+section+"_push"
self.schedule_option = schedule_option
class Scheduler(object):
"""
The scheduler class.
The node and each service inherit an independent scheduler through
this class.
"""
def __init__(self, config_defaults=None, config=None, options=None,
scheduler_actions=None, log=None, name="node", svc=None):
self.config_defaults = config_defaults
self.config = config
if scheduler_actions is None:
self.scheduler_actions = {}
else:
self.scheduler_actions = scheduler_actions
if options is None:
self.options = Storage()
else:
self.options = options
self.name = name
self.svc = svc
self.log = log
self.sched_log_shut = False
def sched_log(self, task, msg, level):
"""
A logger wrapping method, used to log to the service or node
sublogger dedicated to scheduling.
"""
if self.sched_log_shut:
return
try:
task = task.replace(self.name + ".", "")
except:
pass
log = logging.getLogger(self.log.name+".scheduler")
getattr(log, level)(SCHED_FMT % (task, msg))
def get_next_schedule(self, action, _max=14400):
"""
Iterate future dates in search for the next date validating
scheduling constraints.
"""
self.sched_log_shut = True
now = datetime.datetime.now()
cron = self.options.cron
self.options.cron = True
for idx in range(_max):
future_dt = now + datetime.timedelta(minutes=idx*10)
data = self.skip_action(action, now=future_dt, deferred_write_timestamp=True)
if isinstance(data, dict):
if len(data["keep"]) > 0:
return {"next_sched": future_dt, "minutes": _max}
elif not data:
self.options.cron = cron
return {"next_sched": future_dt, "minutes": _max}
self.options.cron = cron
return {"next_sched": None, "minutes": None}
@staticmethod
def _need_action_interval(last, delay=10, now=None):
"""
Return False if timestamp is fresher than now-interval
Return True otherwize.
Zero is a infinite interval.
"""
if delay == 0:
return False
if last is None:
return True
if now is None:
now = datetime.datetime.now()
limit = last + datetime.timedelta(minutes=delay)
if now < limit:
return False
else:
return True
# never reach here
return True
@staticmethod
def sched_delay(delay=59):
"""
Sleep for a random delay before executing the task, and after the
scheduling constraints have been validated.
"""
delay = int(random.random()*delay)
try:
time.sleep(delay)
except KeyboardInterrupt:
raise ex.excError("interrupted while waiting for scheduler delay")
def sched_write_timestamp(self, sopt):
"""
Iterate the scheduled tasks to update the last run timestamp.
"""
if not isinstance(sopt, list):
sopt = [sopt]
for _sopt in sopt:
self._timestamp(_sopt.fname)
@staticmethod
def _timestamp(timestamp_f):
"""
Update the timestamp file .
If if is not a fullpath, consider it parented to
.
Create missing parent directories if needed.
"""
if not timestamp_f.startswith(os.sep):
timestamp_f = os.path.join(rcEnv.pathvar, timestamp_f)
timestamp_d = os.path.dirname(timestamp_f)
if not os.path.isdir(timestamp_d):
os.makedirs(timestamp_d, 0o755)
with open(timestamp_f, 'w') as ofile:
ofile.write(str(datetime.datetime.now())+'\n')
return True
def _skip_action_interval(self, last, interval, now=None):
"""
Return the negation of _need_action_interval()
"""
return not self._need_action_interval(last, interval, now=now)
def _in_timerange_probabilistic(self, timerange, now=None):
"""
Validate a timerange constraint of a scheduled task, with an added
failure probability decreasing with the remaining allowed window.
proba
^
100% |
75% |XXX
50% |XXXX
25% |XXXXXX
0% ----|----|-> elapsed
0% 50% 100%
This algo is meant to level collector's load which peaks
when all daily cron trigger at the same minute.
"""
if not timerange.get("probabilistic", False):
return
try:
begin = self._time_to_minutes(timerange["begin"])
end = self._time_to_minutes(timerange["end"])
now = self._time_to_minutes(now)
except:
raise SchedNotAllowed("time conversion error in probabilistic "
"challenge")
if begin > end:
end += 1440
if now < begin:
now += 1440
length = end - begin
if length < 60:
# no need to play this game on short allowed periods
return
if timerange["interval"] <= length:
# don't skip if interval <= period length, because the user
# expects the action to run multiple times in the period
return
length -= 11
elapsed = now - begin
elapsed_pct = min(100, int(100.0 * elapsed / length))
if elapsed_pct < 50:
# fixed skip proba for a perfect leveling on the first half-period
proba = 100.0 - max(1, 1000.0 / length)
else:
# decreasing skip proba on the second half-period
proba = 100.0 - min(elapsed_pct, 100)
rnd = random.random() * 100.0
if rnd >= proba:
self.log.debug("win probabilistic challenge: %d, "
"over %d"%(rnd, proba))
return
raise SchedNotAllowed("lost probabilistic challenge: %d, "
"over %d"%(rnd, proba))
@staticmethod
def _time_to_minutes(dt_spec):
"""
Convert a datetime or a %H:%M formatted string to minutes.
"""
if isinstance(dt_spec, datetime.datetime):
dtm = dt_spec
dt_spec = dtm.hour * 60 + dtm.minute
else:
try:
dtm = time.strptime(dt_spec, "%H:%M")
except:
raise Exception("malformed time string: %s"%str(dt_spec))
dt_spec = dtm.tm_hour * 60 + dtm.tm_min
return dt_spec
def _in_timeranges(self, schedule, fname=None, now=None, last=None):
"""
Validate the timerange constraints of a schedule.
Iterates multiple allowed timeranges, and switch between simple and
probabilistic validation.
"""
if len(schedule["timeranges"]) == 0:
raise SchedNotAllowed("no timeranges")
errors = []
for timerange in schedule["timeranges"]:
try:
self.in_timerange(timerange, now=now)
self.in_timerange_interval(timerange, fname=fname, now=now, last=last)
if fname is not None:
# fname as None indicates we run in test mode
self._in_timerange_probabilistic(timerange, now=now)
return
except SchedNotAllowed as exc:
errors.append(str(exc))
raise SchedNotAllowed(", ".join(errors))
def in_timerange(self, timerange, now=None):
"""
Validate if is in .
"""
try:
begin = self._time_to_minutes(timerange["begin"])
end = self._time_to_minutes(timerange["end"])
now = self._time_to_minutes(now)
except:
raise SchedNotAllowed("conversion error in timerange challenge")
if begin <= end:
if now >= begin and now <= end:
return
elif begin > end:
#
# =================
# 23h 0h 1h
#
if (now >= begin and now <= 1440) or \
(now >= 0 and now <= end):
return
raise SchedNotAllowed("not in timerange %s-%s" % \
(timerange["begin"], timerange["end"]))
def get_last(self, fname):
"""
Return the last task run timestamp, fetched from the on-disk cache.
"""
timestamp_f = self.get_timestamp_f(fname)
if not os.path.exists(timestamp_f):
return
try:
with open(timestamp_f, 'r') as ofile:
buff = ofile.read()
last = datetime.datetime.strptime(buff, "%Y-%m-%d %H:%M:%S.%f\n")
return last
except (OSError, IOError, ValueError):
return
def in_timerange_interval(self, timerange, fname=None, now=None, last=None):
"""
Validate if the last task run is old enough to allow running again.
"""
if timerange["interval"] == 0:
raise SchedNotAllowed("interval set to 0")
if fname is None:
# test mode
return
if last is None:
last = self.get_last(fname)
if last is None:
return
if self._skip_action_interval(last, timerange["interval"], now=now):
raise SchedNotAllowed("last run is too soon")
return
def _in_schedule(self, schedule, fname=None, now=None, last=None):
"""
Validate if is in the allowed days and in the allowed timranges.
"""
self._in_days(schedule, now=now)
self._in_timeranges(schedule, fname=fname, now=now, last=last)
def in_schedule(self, schedules, fname=None, now=None, last=None):
"""
Validate if pass the constraints of a set of schedules,
iterating over each non-excluded one.
"""
if len(schedules) == 0:
raise SchedNotAllowed("no schedule")
errors = []
for schedule in schedules:
try:
self._in_schedule(schedule, fname=fname, now=now, last=last)
if schedule["exclude"]:
raise SchedExcluded('excluded by schedule member "%s"' % schedule["raw"])
else:
return
except SchedNotAllowed as exc:
errors.append(str(exc))
raise SchedNotAllowed(", ".join(errors))
def sched_convert_to_schedule(self, config, section, prefix=""):
"""
Read and convert a deprecated schedule definition from a configuration
file section, handle json-formatted lists, and finally return a
current-style schedule string.
"""
def get_val(param):
if not config.has_section(section) or \
(not config.has_option(section, param) and \
not config.has_option(section, prefix+param)):
# internal syncs
config_defaults = config.defaults()
val = config_defaults.get(prefix+param)
elif config.has_option(section, prefix+param):
val = config.get(section, prefix+param)
else:
val = config.get(section, param)
return str(val)
days_s = get_val("days")
interval_s = get_val("interval")
period_s = get_val("period")
if days_s == "None" or interval_s == "None" or period_s == "None":
return ""
try:
days = json.loads(days_s)
except:
self.log.error("invalid days schedule definition in section",
section, days_s, file=sys.stderr)
return ""
try:
periods = json.loads(period_s)
elements = []
if is_string(periods[0]):
periods = [periods]
for period in periods:
elements.append("%s-%s@%s" % (period[0], period[1], interval_s))
period_s = ",".join(elements)
except:
self.log.error("invalid periods schedule definition in section",
section, file=sys.stderr)
return ""
buff = "%(period)s %(days)s" % dict(
period=period_s,
days=",".join(days),
)
return buff.strip()
def sched_get_schedule_raw(self, section, option):
"""
Read the old/new style schedule options of a configuration file
section. Convert if necessary and return the new-style formatted
string.
"""
if option is None:
raise SchedNoDefault
config = self.config
def has_old_schedule_options(config, section):
"""
Return True if a configuration file section has a deprecated
schedule definition keyword
"""
if config.has_option(section, 'sync_days') or \
config.has_option(section, 'sync_interval') or \
config.has_option(section, 'sync_period'):
return True
if config.has_option(section, 'days') or \
config.has_option(section, 'interval') or \
config.has_option(section, 'period'):
return True
return False
if config.has_section(section) and \
config.has_option(section, 'schedule'):
schedule_s = config.get(section, 'schedule')
elif section.startswith("sync") and config.has_section(section) and \
has_old_schedule_options(config, section):
if section.startswith("sync"):
prefix = "sync_"
elif section.startswith("app"):
prefix = "app_"
else:
prefix = ""
schedule_s = self.sched_convert_to_schedule(config, section, prefix=prefix)
elif section.startswith("sync") and not config.has_section(section) and (\
'sync_days' in config.defaults() or \
'sync_interval' in config.defaults() or \
'sync_period' in config.defaults() \
):
schedule_s = self.sched_convert_to_schedule(config, section, prefix="sync_")
elif config.has_option('DEFAULT', option):
schedule_s = config.get('DEFAULT', option)
elif self.svc and section in self.svc.resources_by_id and \
hasattr(self.svc.resources_by_id[section], "default_schedule"):
schedule_s = self.svc.resources_by_id[section].default_schedule
elif option in self.config_defaults:
schedule_s = self.config_defaults[option]
else:
raise SchedNoDefault
return schedule_s
def _in_days(self, schedule, now=None):
self._sched_validate_month(schedule["month"], now=now)
self._sched_validate_week(schedule["week"], now=now)
self._sched_validate_day(schedule["day"], now=now)
def _sched_validate_day(self, day, now=None):
"""
Split the allowed spec and for each element,
validate if is in allowed of week and of month.
"""
for _day in day.split(","):
try:
self.__sched_validate_day(_day, now=now)
return
except SchedNotAllowed:
pass
raise SchedNotAllowed("not in allowed days")
def __sched_validate_day(self, day, now=None):
"""
Validate if is in allowed of week and of month.
"""
n_col = day.count(":")
day_of_month = None
from_tail = None
from_head = None
if n_col > 1:
raise SchedSyntaxError("only one ':' allowed in day spec '%s'" %day)
elif n_col == 1:
day, day_of_month = day.split(":")
if len(day_of_month) == 0:
raise SchedSyntaxError("day_of_month specifier is empty")
if day_of_month in ("first", "1st"):
from_head = True
day_of_month = 1
elif day_of_month in ("second", "2nd"):
from_head = True
day_of_month = 2
elif day_of_month in ("third", "3rd"):
from_head = True
day_of_month = 3
elif day_of_month in ("fourth", "4th"):
from_head = True
day_of_month = 4
elif day_of_month in ("fifth", "5th"):
from_head = True
day_of_month = 5
elif day_of_month == "last":
from_tail = True
day_of_month = 1
elif day_of_month[0] == "-":
from_tail = True
day_of_month = day_of_month[1:]
elif day_of_month[0] == "+":
from_head = True
day_of_month = day_of_month[1:]
try:
day_of_month = int(day_of_month)
except ValueError:
raise SchedSyntaxError("day_of_month is not a number")
day = self._sched_expand_value(day)
if day in ("*", ""):
allowed_days = range(7)
else:
allowed_days = [d for d in day if d >= 0 and d <= 6]
if now is None:
now = datetime.datetime.now()
this_week_day = now.weekday()
if this_week_day not in allowed_days:
raise SchedNotAllowed
if day_of_month is not None:
_day = now
_month = _day.month
if from_head is True:
if day == "":
day1 = _day - datetime.timedelta(days=day_of_month)
day2 = _day - datetime.timedelta(days=day_of_month-1)
else:
day1 = _day - datetime.timedelta(days=7*day_of_month)
day2 = _day - datetime.timedelta(days=7*(day_of_month-1))
if day1.month == _month or day2.month != _month:
raise SchedNotAllowed
elif from_tail is True:
if day == "":
day1 = _day + datetime.timedelta(days=day_of_month)
day2 = _day + datetime.timedelta(days=day_of_month-1)
else:
day1 = _day + datetime.timedelta(days=7*day_of_month)
day2 = _day + datetime.timedelta(days=7*(day_of_month-1))
if day1.month == _month or day2.month != _month:
raise SchedNotAllowed
elif _day.day != day_of_month:
raise SchedNotAllowed
return
def _sched_validate_week(self, week, now=None):
"""
Validate if is in allowed .
"""
week = self._sched_expand_value(week)
if week == "*":
return
allowed_weeks = [w for w in week if w >= 1 and w <= 53]
if now is None:
now = datetime.datetime.now()
if now.isocalendar()[1] not in allowed_weeks:
raise SchedNotAllowed("not in allowed weeks")
return
def _sched_validate_month(self, month, now=None):
"""
Validate if is in allowed .
"""
if month == "*":
return
allowed_months = set([])
for _month in month.split(","):
ecount = _month.count("%")
if ecount == 1:
month_s, modulo_s = _month.split("%")
elif ecount == 0:
month_s = _month
modulo_s = None
else:
raise SchedSyntaxError("only one '%%' allowed in month definition '%s'" % _month)
if month_s in ("", "*"):
_allowed_months = set(range(12))
else:
_allowed_months = self._sched_expand_value(month_s)
if modulo_s is not None:
_allowed_months &= self.__sched_validate_month(modulo_s)
allowed_months |= _allowed_months
if now is None:
now = datetime.datetime.now()
if now.month not in allowed_months:
raise SchedNotAllowed("not in allowed months")
return
@staticmethod
def __sched_validate_month(modulo):
shift = 0
n_plus = modulo.count("+")
if n_plus > 1:
raise SchedSyntaxError("only one '+' is allowed in modulo '%s'" % modulo)
if n_plus == 1:
modulo, shift = modulo.split("+")
try:
modulo = int(modulo)
except ValueError:
raise SchedSyntaxError("modulo '%s' is not a number" % modulo)
try:
shift = int(shift)
except ValueError:
raise SchedSyntaxError("shift '%s' is not a number" % shift)
return set([m for m in range(1, 13) if (m + shift) % modulo == 0])
@staticmethod
def _sched_to_int(name):
try:
idx = int(name)
return idx
except ValueError:
name = name.lower()
if name not in CALENDAR_NAMES:
raise SchedSyntaxError("unknown calendar name '%s'" % name)
return CALENDAR_NAMES[name]
def _sched_expand_value(self, spec):
"""
Top level schedule definition parser.
Split the definition into sub-schedules, and parse each one.
"""
elements = set([])
if spec in ("*", ""):
return spec
subspecs = spec.split(",")
for subspec in subspecs:
n_dash = subspec.count("-")
if n_dash > 1:
raise SchedSyntaxError("only one '-' allowed in timerange '%s'" % spec)
elif n_dash == 0:
elements.add(self._sched_to_int(subspec))
continue
begin, end = subspec.split("-")
begin = self._sched_to_int(begin)
end = self._sched_to_int(end)
_range = sorted([begin, end])
elements |= set(range(_range[0], _range[1]+1))
return elements
def _interval_from_timerange(self, timerange):
"""
Return a default interval from a timerange data structure.
This interval is the timerange length in minute, plus one.
"""
begin_m = self._time_to_minutes(timerange['begin'])
end_m = self._time_to_minutes(timerange['end'])
return end_m - begin_m + 1
def _sched_parse_timerange(self, spec, section=None):
"""
Return the list of timerange data structure parsed from the
definition string.
"""
def parse_timerange(spec):
if spec == "*" or spec == "":
return {"begin": "00:00", "end": "23:59"}
if "-" not in spec:
spec = "-".join((spec, spec))
try:
begin, end = spec.split("-")
except:
raise SchedSyntaxError("split '%s' error" % spec)
if begin.count(":") != 1 or \
end.count(":") != 1:
raise SchedSyntaxError("only one ':' allowed in timerange '%s' end" % spec)
begin_m = self._time_to_minutes(begin)
end_m = self._time_to_minutes(end)
if begin_m > end_m:
tmp = end
end = begin
begin = tmp
elif begin_m == end_m:
end_m += 10
end = "%02d:%02d" % (end_m // 60, end_m % 60)
return {"begin": begin, "end": end}
if section and section.startswith("sync"):
probabilistic = False
else:
probabilistic = True
tr_list = []
for _spec in spec.split(","):
if len(_spec) == 0 or _spec == "*":
tr_data = {
"probabilistic": probabilistic,
"begin": "00:00",
"end": "23:59",
"interval": 1441,
}
tr_list.append(tr_data)
continue
ecount = _spec.count("@")
if ecount == 0:
tr_data = parse_timerange(_spec)
tr_data["interval"] = self._interval_from_timerange(tr_data)
tr_data["probabilistic"] = probabilistic
tr_list.append(tr_data)
continue
elements = _spec.split("@")
ecount = len(elements)
if ecount < 2:
raise SchedSyntaxError("missing @ in '%s'" % _spec)
if ecount > 2:
raise SchedSyntaxError("only one @ allowed in '%s'" % _spec)
tr_data = parse_timerange(elements[0])
tr_data["probabilistic"] = probabilistic
try:
tr_data["interval"] = int(elements[1])
except:
raise SchedSyntaxError("interval '%s' is not a number" % elements[1])
tr_list.append(tr_data)
return tr_list
def sched_get_schedule(self, section, option, schedules=None):
"""
Return the list of schedule structures for the spec string passed
as or, if not passed, from the . value
in the configuration file.
"""
if schedules is None:
schedules = self.sched_get_schedule_raw(section, option)
try:
schedules = json.loads(schedules)
except:
pass
if is_string(schedules):
schedules = [schedules]
data = []
for schedule in schedules:
schedule_orig = schedule
schedule = schedule.strip()
if len(schedule) == 0:
continue
if schedule.startswith("!"):
exclude = True
schedule = schedule[1:].strip()
else:
exclude = False
if len(schedule) == 0:
continue
elements = schedule.split()
ecount = len(elements)
if ecount == 1:
_data = {
"timeranges": self._sched_parse_timerange(elements[0], section=section),
"day": "*",
"week": "*",
"month": "*",
}
elif ecount == 2:
_tr, _day = elements
_data = {
"timeranges": self._sched_parse_timerange(_tr, section=section),
"day": _day,
"week": "*",
"month": "*",
}
elif ecount == 3:
_tr, _day, _week = elements
_data = {
"timeranges": self._sched_parse_timerange(_tr, section=section),
"day": _day,
"week": _week,
"month": "*",
}
elif ecount == 4:
_tr, _day, _week, _month = elements
_data = {
"timeranges": self._sched_parse_timerange(_tr, section=section),
"day": _day,
"week": _week,
"month": _month,
}
else:
raise SchedSyntaxError("invalid number of element, '%d' not in "
"(1, 2, 3, 4)" % ecount)
_data["exclude"] = exclude
_data["raw"] = schedule_orig
data.append(_data)
return data
def allow_action_schedule(self, section, option, fname=None, now=None, last=None):
if option is None:
return
if now is None:
now = datetime.datetime.now()
try:
schedule = self.sched_get_schedule(section, option)
self.in_schedule(schedule, fname=fname, now=now, last=last)
except SchedNoDefault:
raise SchedNotAllowed("no schedule in section %s and no default "
"schedule"%section)
except SchedSyntaxError as exc:
raise SchedNotAllowed("malformed parameter value: %s.schedule "
"(%s)"%(section, str(exc)))
def skip_action_schedule(self, section, option, fname=None, now=None, last=None):
try:
self.allow_action_schedule(section, option, fname=fname, now=now, last=last)
return False
except SchedExcluded:
return True
except Exception:
return True
return True
@staticmethod
def get_timestamp_f(fname):
"""
Return the full path of the last run timestamp file with the
basename.
"""
timestamp_f = os.path.realpath(os.path.join(rcEnv.pathvar, fname))
return timestamp_f
def _is_croned(self):
"""
Return True if the cron option is set.
"""
return self.options.cron
def skip_action(self, action, section=None, fname=None,
schedule_option=None, now=None,
deferred_write_timestamp=False):
if action not in self.scheduler_actions:
if not self._is_croned():
return False
return {"count": 0, "keep": [], "skip": []}
if isinstance(self.scheduler_actions[action], list):
data = {"count": 0, "keep": [], "skip": []}
idx = 0
for idx, sopt in enumerate(self.scheduler_actions[action]):
skip = self._skip_action(
action, sopt,
section=section, fname=fname, schedule_option=schedule_option,
now=now, deferred_write_timestamp=deferred_write_timestamp
)
if skip:
data["skip"].append(sopt)
else:
data["keep"].append(sopt)
data["count"] = idx + 1
return data
else:
sopt = self.scheduler_actions[action]
return self._skip_action(
action, sopt,
section=section, fname=fname, schedule_option=schedule_option,
now=now, deferred_write_timestamp=deferred_write_timestamp
)
def _skip_action(self, action, sopt, section=None, fname=None,
schedule_option=None, now=None,
deferred_write_timestamp=False):
if section is None:
section = sopt.section
if fname is None:
fname = sopt.fname
if schedule_option is None:
schedule_option = sopt.schedule_option
def title():
"""
Return a string to use as the task title in log entries.
"""
buff = ".".join((self.name, action))
if "#" in section:
buff += "." + section
return buff
if not self._is_croned():
# don't update the timestamp file
return False
# check if we are in allowed scheduling period
try:
self.allow_action_schedule(section, schedule_option, fname=fname, now=now)
except Exception as exc:
self.sched_log(title(), str(exc), "debug")
return True
self.sched_log(title(), "run task", "info")
# update the timestamp file
if not deferred_write_timestamp:
timestamp_f = self.get_timestamp_f(fname)
self._timestamp(timestamp_f)
self.sched_log(title(), "last run timestamp updated", "debug")
return False
def print_schedule(self):
"""
The 'print schedule' node and service action entrypoint.
"""
if not hasattr(self, "config") or self.config is None:
print("you are not allowed to print schedules", file=sys.stderr)
raise ex.excError()
if self.options.format is None:
self._print_schedule_default()
return
data = self._print_schedule_data()
if self.svc and len(self.svc.node.svcs) > 1:
# let the Node object do the formatting (for aggregation)
return data
# format ourself
return self._print_schedule(data)
@formatter
def _print_schedule(self, data):
"""
Display the scheduling table using the formatter specified in
command line --format option.
"""
return data
def _print_schedule_default(self):
"""
Print the scheduling table in normal or detailled mode.
"""
if self.options.verbose:
print_sched_fmt = "%(action)-21s %(last_run)-21s %(next_run)-19s %(config_parameter)-24s %(schedule_definition)s"
print("action last run next run config parameter schedule definition")
print("------ -------- -------- ---------------- -------------------")
else:
print_sched_fmt = "%(action)-21s %(last_run)-21s %(config_parameter)-24s %(schedule_definition)s"
print("action last run config parameter schedule definition")
print("------ -------- ---------------- -------------------")
for data in self._print_schedule_data():
print(print_sched_fmt % data)
def _print_schedule_data(self):
"""
Return a list of dict of schedule information for all tasks.
"""
data = []
for action in sorted(self.scheduler_actions):
data += self.__print_schedule_data(action)
return data
def __print_schedule_data(self, action):
"""
Return a dict of a scheduled task, or list of dict of a task-set,
containing schedule information.
"""
data = []
if isinstance(self.scheduler_actions[action], list):
for sopt in self.scheduler_actions[action]:
data += [self.___print_schedule_data(action, sopt)]
else:
sopt = self.scheduler_actions[action]
data += [self.___print_schedule_data(action, sopt)]
return data
def ___print_schedule_data(self, action, sopt):
"""
Return a dict of a scheduled task information.
"""
section = sopt.section
schedule_option = sopt.schedule_option
fname = sopt.fname
try:
schedule_s = self.sched_get_schedule_raw(section, schedule_option)
except SchedNoDefault:
schedule_s = "anytime"
except SchedSyntaxError:
schedule_s = "malformed"
if len(schedule_s) == 0:
schedule_s = "-"
timestamp_f = self.get_timestamp_f(fname)
try:
with open(timestamp_f, 'r') as ofile:
last_s = ofile.read()
last_s = last_s.split('.')[0]
except (IOError, OSError):
last_s = "-"
if section != "DEFAULT":
param = "schedule"
else:
param = schedule_option
param = '.'.join((section, param))
if self.options.verbose:
result = self.get_next_schedule(action)
if result["next_sched"]:
next_s = result["next_sched"].strftime("%Y-%m-%d %H:%M")
else:
next_s = "-"
return dict(
action=action,
last_run=last_s,
next_run=next_s,
config_parameter=param,
schedule_definition=schedule_s
)
else:
return dict(
action=action,
last_run=last_s,
config_parameter=param,
schedule_definition=schedule_s,
)
@staticmethod
def _str_to_datetime(datetime_str):
"""
Convert a %Y-%m-%d %H:%M formatted string to a datetime.
"""
converted = datetime.datetime.strptime(datetime_str, "%Y-%m-%d %H:%M")
return converted
def test_schedule(self, schedule_s, date_s, expected):
"""
Test if passes constraints and compares with the
expected boolean result .
Print a test report line.
This method is used by the test_scheduler() testing function.
"""
dtm = self._str_to_datetime(date_s)
try:
schedule = self.sched_get_schedule("dummy", "dummy", schedules=schedule_s)
except SchedSyntaxError as exc:
if expected == None:
print("passed : schedule syntax error %s (%s)" % (repr(schedule_s), str(exc)))
return True
else:
print("failed : schedule syntax error %s (%s)" % (repr(schedule_s), str(exc)))
return False
try:
self.in_schedule(schedule, fname=None, now=dtm)
result = True
result_s = ""
except SchedSyntaxError as exc:
if expected == None:
print("passed : schedule syntax error %s (%s)" % (repr(schedule_s), str(exc)))
return True
else:
print("failed : schedule syntax error %s (%s)" % (repr(schedule_s), str(exc)))
return False
except SchedNotAllowed as exc:
result = False
result_s = "("+str(exc)+")"
if result == expected:
check = "passed"
ret = True
else:
check = "failed"
ret = False
print("%s : test '%s' in schedule %-50s expected %s => result %s %s" % \
(check, date_s, repr(schedule_s), str(expected), str(result), result_s))
return ret
def test_scheduler():
"""
A exercizing function for the scheduler.
"""
tests = [
("", "2015-02-27 10:00", False),
("@0", "2015-02-27 10:00", False),
("*@0", "2015-02-27 10:00", False),
("*", "2015-02-27 10:00", True),
("*@61", "2015-02-27 10:00", True),
("09:00-09:20", "2015-02-27 10:00", False),
("09:00-09:20@31", "2015-02-27 10:00", False),
("09:00-09:00", "2015-02-27 10:00", False),
("09:20-09:00", "2015-02-27 10:00", False),
("09:00", "2015-02-27 10:00", False),
("09:00-09:20", "2015-02-27 09:09", True),
("09:00-09:20@31", "2015-02-27 09:09", True),
("09:00-09:00", "2015-02-27 09:09", True),
("09:20-09:00", "2015-02-27 09:09", True),
("09:00", "2015-02-27 09:09", True),
("* fri", "2015-10-09 10:00", True),
("* fri", "2015-10-08 10:00", False),
("* *:last", "2015-01-30 10:00", True),
("* *:last", "2015-01-31 10:00", True),
("* *:-1", "2015-01-31 10:00", True),
("* *:-1", "2015-01-24 10:00", False),
("* *:-2", "2015-01-31 10:00", False),
("* :last", "2015-01-30 10:00", False),
("* :last", "2015-01-31 10:00", True),
("* :-1", "2015-01-31 10:00", True),
("* :-2", "2015-01-30 10:00", True),
("* :-2", "2015-01-31 10:00", False),
("* :-2", "2015-01-05 10:00", False),
("* :5", "2015-01-05 10:00", True),
("* :+5", "2015-01-05 10:00", True),
("* :fifth", "2015-01-05 10:00", True),
("* :5", "2015-01-06 10:00", False),
("* :+5", "2015-01-06 10:00", False),
("* :fifth", "2015-01-06 10:00", False),
("* * * jan", "2015-01-06 10:00", True),
("* * * jan-feb", "2015-01-06 10:00", True),
("* * * %2", "2015-01-06 10:00", False),
("* * * %2+1", "2015-01-06 10:00", True),
("* * * jan-feb%2", "2015-01-06 10:00", False),
("* * * jan-feb%2+1", "2015-01-06 10:00", True),
("18:00-18:59@60 wed", "2016-08-31 18:00", True),
("18:00-18:59@60 wed", "2016-08-30 18:00", False),
("23:00-23:59@61 *:first", "2016-09-01 23:00", True),
# syntax errors
("23:00-23:59@61 *:first:*", "2016-09-01 23:00", None),
("23:00-23:59@61 *:", "2016-09-01 23:00", None),
("23:00-23:59@61 *:*", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2+1+2", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %foo", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2+foo", "2016-09-01 23:00", None),
("23:00-23:59@61 freday", "2016-09-01 23:00", None),
("23:00-23:59@61 * * junuary", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59-01:00@61", "2016-09-01 23:00", None),
("23:00-23:59:00@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59@61@10", "2016-09-01 23:00", None),
("23:00-23:59 * * * * *", "2016-09-01 23:00", None),
]
sched = Scheduler()
for test in tests:
assert sched.test_schedule(*test)
if __name__ == "__main__":
test_scheduler()
opensvc-1.8~20170412/lib/resDiskGandi.py 0000644 0001750 0001750 00000014322 13073467726 017754 0 ustar jkelbert jkelbert import resDisk
import os
import rcStatus
import re
import pwd
import grp
import stat
from rcGlobalEnv import rcEnv
from rcUtilities import is_string
import rcExceptions as ex
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
name=None,
node=None,
cloud_id=None,
user="root",
group="root",
perm="660",
**kwargs):
self.label = "gandi volume %s"%str(name)
resDisk.Disk.__init__(self,
rid=rid,
name="gandi",
type='disk.gandi',
**kwargs)
self.name = name
self.node = node
self.cloud_id = cloud_id
self.user = user
self.group = group
self.perm = perm
self.get_uid()
self.get_gid()
def print_obj(self, n):
for k in dir(n):
if '__' in k:
continue
print(k, "=", getattr(n, k))
def get_cloud(self):
if hasattr(self, 'cloud'):
return self.cloud
try:
self.cloud = self.svc.node.cloud_get(self.cloud_id)
except ex.excInitError as e:
raise ex.excError(str(e))
return self.cloud
def get_uid(self):
self.uid = self.user
if is_string(self.uid):
try:
info=pwd.getpwnam(self.uid)
self.uid = info[2]
except:
pass
def get_gid(self):
self.gid = self.group
if is_string(self.gid):
try:
info=grp.getgrnam(self.gid)
self.gid = info[2]
except:
pass
def check_uid(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
uid = os.stat(rdev).st_uid
if uid != self.uid:
if verbose:
self.status_log('%s uid should be %d but is %d'%(rdev, self.uid, uid))
return False
return True
def check_gid(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
gid = os.stat(rdev).st_gid
if gid != self.gid:
if verbose:
self.status_log('%s gid should be %d but is %d'%(rdev, self.gid, gid))
return False
return True
def check_perm(self, rdev, verbose=False):
if not os.path.exists(rdev):
return True
try:
perm = oct(stat.S_IMODE(os.stat(rdev).st_mode))
except:
self.log.error('%s can not stat file'%rdev)
return False
perm = str(perm).lstrip("0o").lstrip("0")
if perm != str(self.perm):
if verbose:
self.status_log('%s perm should be %s but is %s'%(rdev, str(self.perm), perm))
return False
return True
def has_it(self):
"""Returns True if all devices are present
"""
try:
node = self.get_node()
except ex.excError as e:
raise ex.excError("can't find cloud node to list volumes (%s)"%str(e))
c = self.get_cloud()
disks = c.driver._node_info(node.id)['disks']
for disk in disks:
if disk['name'] == self.name:
return True
return False
def is_up(self):
"""Returns True if the volume group is present and activated
"""
return self.has_it()
def _status(self, verbose=False):
try:
s = self.is_up()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if rcEnv.nodename in self.always_on:
if s:
return rcStatus.STDBY_UP
else:
return rcStatus.STDBY_DOWN
else:
if s:
return rcStatus.UP
else:
return rcStatus.DOWN
def get_node(self):
c = self.get_cloud()
if self.node is not None:
n = self.node
else:
n = rcEnv.nodename
try:
nodes = c.driver.list_nodes()
except Exception as e:
raise ex.excError(str(e))
for node in nodes:
if node.name == n:
return node
raise ex.excError()
def get_disk(self):
c = self.get_cloud()
disks = c.driver.ex_list_disks()
_disk = None
for disk in disks:
if disk.name == self.name:
_disk = disk
if _disk is None:
raise ex.excError()
return _disk
def do_start(self):
try:
node = self.get_node()
except ex.excError as e:
raise ex.excError("can't find cloud node to attach volume %s to (%s)"%(self.name, str(e)))
try:
disk = self.get_disk()
except:
raise ex.excError("volume %s not found in %s"%(self.name, self.cloud_id))
try:
status = self.is_up()
except ex.excError as e:
self.log.error("abort gandi volume %s attach: %s"%(self.name, str(e)))
if status:
self.log.info("gandi volume %s is already attached"%self.name)
return
self.log.info("attach gandi volume %s"%self.name)
c = self.get_cloud()
c.driver.ex_node_attach_disk(node, disk)
self.can_rollback = True
def do_stop(self):
try:
node = self.get_node()
except ex.excError as e:
raise ex.excError("can't find cloud node to detach volume %s from"%(self.name, str(e)))
try:
disk = self.get_disk()
except:
raise ex.excError("volume %s not found in %s"%(self.name, self.cloud_id))
try:
status = self.is_up()
except ex.excError as e:
self.log.error("abort gandi volume %s detach: %s"%(self.name, str(e)))
if not status:
self.log.info("gandi volume %s is already detached"%self.name)
return
self.log.info("detach gandi volume %s"%self.name)
c = self.get_cloud()
c.driver.ex_node_detach_disk(node, disk)
def shutdown(self):
pass
def disklist(self):
return []
opensvc-1.8~20170412/lib/provDiskAmazon.py 0000644 0001750 0001750 00000003264 13073467726 020357 0 ustar jkelbert jkelbert from provisioning import Provisioning
import rcExceptions as ex
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
self.volumes_done = []
def provisioner(self):
for volume in self.r.volumes:
self._provisioner(volume)
volumes = ' '.join(self.volumes_done)
self.r.svc.config.set(self.r.rid, "volumes", volumes)
self.r.svc.write_config()
self.r.volumes = self.volumes_done
self.r.log.info("provisioned")
self.r.start()
return True
def _provisioner(self, volume):
if not volume.startswith("<") and not volume.endswith(">"):
self.r.log.info("volume %s already provisioned" % volume)
self.volumes_done.append(volume)
return
s = volume.strip("<>")
v = s.split(",")
kwargs = {}
for e in v:
try:
key, val = e.split("=")
except:
raise ex.excError("format error: %s. expected key=value." % e)
kwargs[key] = val
cmd = ["ec2", "create-volume"]
if "size" in kwargs:
cmd += ["--size", kwargs["size"]]
if "iops" in kwargs:
cmd += ["--iops", kwargs["iops"]]
if "availability-zone" in kwargs:
cmd += ["--availability-zone", kwargs["availability-zone"]]
else:
node = self.r.get_instance_data()
availability_zone = node["Placement"]["AvailabilityZone"]
cmd += ["--availability-zone", availability_zone]
data = self.r.aws(cmd)
self.r.wait_avail(data["VolumeId"])
self.volumes_done.append(data["VolumeId"])
opensvc-1.8~20170412/lib/rcStatus.py 0000644 0001750 0001750 00000013272 13073467726 017220 0 ustar jkelbert jkelbert """
This module defines the Status class and the functions
to convert a Status to its printable form or integer form.
"""
from rcColor import color, colorize
UP = 0
DOWN = 1
WARN = 2
NA = 3
UNDEF = 5
STDBY_UP = 6
STDBY_DOWN = 7
STDBY_UP_WITH_UP = 8
STDBY_UP_WITH_DOWN = 9
STATUS_VALUE = {
'up': UP,
'down': DOWN,
'warn': WARN,
'n/a': NA,
'na': NA,
'undef': UNDEF,
'stdby up': STDBY_UP,
'stdby down': STDBY_DOWN,
}
STATUS_STR = {
UP: 'up',
DOWN: 'down',
WARN: 'warn',
NA: 'n/a',
UNDEF: 'undef',
STDBY_UP: 'stdby up',
STDBY_DOWN: 'stdby down',
STDBY_UP_WITH_UP: 'up',
STDBY_UP_WITH_DOWN: 'stdby up',
}
def encode_pair(status1, status2):
"""
Return a hashable code unique for the set([status1, status2]).
"""
return (1 << status1) | (1 << status2)
MERGE_RULES = {
encode_pair(UP, UP): UP,
encode_pair(UP, DOWN): WARN,
encode_pair(UP, WARN): WARN,
encode_pair(UP, NA): UP,
encode_pair(UP, STDBY_UP): STDBY_UP_WITH_UP,
encode_pair(UP, STDBY_DOWN): WARN,
encode_pair(UP, STDBY_UP_WITH_UP): STDBY_UP_WITH_UP,
encode_pair(UP, STDBY_UP_WITH_DOWN): WARN,
encode_pair(DOWN, DOWN): DOWN,
encode_pair(DOWN, WARN): WARN,
encode_pair(DOWN, NA): DOWN,
encode_pair(DOWN, STDBY_UP): STDBY_UP_WITH_DOWN,
encode_pair(DOWN, STDBY_DOWN): STDBY_DOWN,
encode_pair(DOWN, STDBY_UP_WITH_UP): WARN,
encode_pair(DOWN, STDBY_UP_WITH_DOWN): STDBY_UP_WITH_DOWN,
encode_pair(WARN, WARN): WARN,
encode_pair(WARN, NA): WARN,
encode_pair(WARN, STDBY_UP): WARN,
encode_pair(WARN, STDBY_DOWN): WARN,
encode_pair(WARN, STDBY_UP_WITH_UP): WARN,
encode_pair(WARN, STDBY_UP_WITH_DOWN): WARN,
encode_pair(NA, NA): NA,
encode_pair(NA, STDBY_UP): STDBY_UP,
encode_pair(NA, STDBY_DOWN): STDBY_DOWN,
encode_pair(NA, STDBY_UP_WITH_UP): STDBY_UP_WITH_UP,
encode_pair(NA, STDBY_UP_WITH_DOWN): STDBY_UP_WITH_DOWN,
encode_pair(STDBY_UP, STDBY_UP): STDBY_UP,
encode_pair(STDBY_UP, STDBY_DOWN): WARN,
encode_pair(STDBY_UP, STDBY_UP_WITH_UP): STDBY_UP_WITH_UP,
encode_pair(STDBY_UP, STDBY_UP_WITH_DOWN): STDBY_UP_WITH_DOWN,
encode_pair(STDBY_DOWN, STDBY_DOWN): STDBY_DOWN,
encode_pair(STDBY_DOWN, STDBY_UP_WITH_UP): WARN,
encode_pair(STDBY_DOWN, STDBY_UP_WITH_DOWN): WARN,
encode_pair(STDBY_UP_WITH_UP, STDBY_UP_WITH_DOWN): WARN,
encode_pair(STDBY_UP_WITH_UP, STDBY_UP_WITH_UP): STDBY_UP_WITH_UP,
encode_pair(STDBY_UP_WITH_DOWN, STDBY_UP_WITH_DOWN): STDBY_UP_WITH_DOWN,
}
def colorize_status(status, lpad=10):
"""
Return the colorized human readable status string.
"""
if isinstance(status, Status):
status = str(status)
elif isinstance(status, int):
status = str(Status(status))
fmt = "%-"+str(lpad)+"s"
if status == "warn":
return colorize(fmt % status, color.BROWN)
elif status.endswith("down") or status in ("err", "error"):
return colorize(fmt % status, color.RED)
elif status.endswith("up") or status == "ok":
return colorize(fmt % status, color.GREEN)
elif status == "n/a":
return colorize(fmt % status, color.LIGHTBLUE)
return fmt % status
def status_value(status):
"""
Return the machine readable status integer code.
"""
if status not in STATUS_VALUE:
return
return STATUS_VALUE[status.lower()]
def status_str(val):
"""
Return the human readable status string.
"""
if val not in STATUS_STR:
return
return STATUS_STR[val]
class Status(object):
"""
Class that wraps printing and calculation of resource status
"""
@staticmethod
def _merge(status1, status2):
"""
Merge two status: WARN taints UP and DOWN
"""
if status1 not in STATUS_STR:
raise Exception("left member has unsupported value: %s" % str(status1))
elif status2 not in STATUS_STR:
raise Exception("right member has unsupported value: %s" % str(status2))
if status1 == UNDEF:
return status2
elif status2 == UNDEF:
return status1
setstate = encode_pair(status1, status2)
if setstate not in MERGE_RULES:
raise Exception("some member has unsupported value: %s , %s " % \
(str(status1), str(status2)))
return MERGE_RULES[setstate]
def value(self):
"""
Return the integer status code.
"""
return self.status
def reset(self):
"""
Reset the status to 'undef'.
"""
self.status = UNDEF
def __add__(self, other):
self.status = self._merge(self.status, other.status)
return self
def __iadd__(self, other):
if isinstance(other, Status):
self.status = self._merge(self.status, other.status)
else:
self.status = self._merge(self.status, other)
return self
def __eq__(self, other):
if isinstance(other, Status):
return self.status == other.status
try:
other = int(other)
return self.status == other
except (ValueError, TypeError):
pass
return str(self) == other
def __str__(self):
return status_str(self.status)
def __init__(self, initial_status=None):
if isinstance(initial_status, Status):
self.status = initial_status.status
elif isinstance(initial_status, int):
self.status = initial_status
elif initial_status is None:
self.status = UNDEF
else:
try:
self.status = int(initial_status)
except (ValueError, TypeError):
self.status = STATUS_VALUE[str(initial_status)]
opensvc-1.8~20170412/lib/rcPkgLinux.py 0000644 0001750 0001750 00000003226 13073467726 017474 0 ustar jkelbert jkelbert import os
from rcUtilities import call, which
from rcGlobalEnv import rcEnv
import datetime
from stat import *
def listpkg_dummy():
print("pushpkg supported on this system")
return []
def listpkg_rpm():
(ret, out, err) = call(cmd, errlog=False, cache=True)
lines = []
for line in out.split('\n'):
if line.startswith('Signature'):
sig = line.split()[-1].strip()
continue
elif not line.startswith('XX'):
continue
line = line[2:]
l = line.split()
if len(l) < 5:
continue
try:
l[4] = datetime.datetime.fromtimestamp(int(l[4])).strftime("%Y-%m-%d %H:%M:%S")
except:
l[4] = ""
x = [rcEnv.nodename] + l + [sig]
lines.append(x)
return lines
def listpkg_deb():
(ret, out, err) = call(cmd, errlog=False, cache=True)
lines = []
arch = ""
for line in out.splitlines():
l = line.split()
if len(l) < 4:
continue
if l[0] != "ii":
continue
x = [rcEnv.nodename] + l[1:3] + [arch, "deb"]
try:
t = os.stat("/var/lib/dpkg/info/"+l[1]+".list")[ST_MTIME]
t = datetime.datetime.fromtimestamp(t).strftime("%Y-%m-%d %H:%M:%S")
except:
t = ""
x.append(t)
lines.append(x)
return lines
if which('dpkg') is not None:
cmd = ['dpkg', '-l']
listpkg = listpkg_deb
elif which('rpm') is not None:
cmd = ['rpm', '-qai', '--queryformat=XX%{n} %{v}-%{r} %{arch} rpm %{installtime}\n']
listpkg = listpkg_rpm
else:
cmd = ['true']
listpkg = listpkg_dummy
def listpatch():
return []
opensvc-1.8~20170412/lib/rcDevTreeVeritas.py 0000644 0001750 0001750 00000016715 13073467726 020636 0 ustar jkelbert jkelbert import glob
import os
from subprocess import *
from rcUtilities import which, justcall
import rcDevTree
from rcGlobalEnv import rcEnv
class DevTreeVeritas(rcDevTree.DevTree):
vxprint_cache = {}
vxdisk_cache = {}
def vx_get_size(self, name):
_dg, _vt = name.split("/")
out = self.vxprint(_dg)
lines = out.split("\n")
lines.reverse()
size = 0
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0] == "v":
name = l[1]
if l[2] == _vt or l[1] == _vt:
size += int(float(l[5].rstrip("m")))
continue
return size
def vx_get_lv_disks(self, devname):
"""
dg vg_sanperftest all all 27000 1426245297.43.parcl1110221a
dm 28785_281 3pardata0_281 auto 32.00m 34782.68m -
sd 28785_281-01 lvset_sanperftest_01-01 28785_281 0.00m 16384.00m 0.00m 3pardata0_281 ENA
sd 28785_281-02 lvset_sanperftest_02-01 28785_281 16384.00m 10240.00m 0.00m 3pardata0_281 ENA
sd 28785_281-03 lv_sanperftest_01-01 28785_281 26624.00m 1024.00m 0.00m 3pardata0_281 ENA
pl lv_sanperftest_01-01 lv_sanperftest_01 ENABLED ACTIVE 1024.00m CONCAT - RW
pl lvset_sanperftest_01-01 lvset_sanperftest_01 ENABLED ACTIVE 16384.00m CONCAT - RW
pl lvset_sanperftest_02-01 lvset_sanperftest_02 ENABLED ACTIVE 10240.00m CONCAT - RW
v lv_sanperftest_01 - ENABLED ACTIVE 1024.00m SELECT - fsgen
v lvset_sanperftest_01 vset_sanperftest ENABLED ACTIVE 16384.00m SELECT - fsgen
v lvset_sanperftest_02 vset_sanperftest ENABLED ACTIVE 10240.00m SELECT - fsgen
vt vset_sanperftest - ENABLED ACTIVE 2
"""
_dg, _vt = devname.split("/")
out = self.vxprint(_dg)
sd = {}
v = []
pl = []
lines = out.split("\n")
lines.reverse()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0] == "v":
name = l[1]
if l[2] == _vt or l[1] == _vt:
v.append(name)
continue
if l[0] == "pl":
name = l[1]
if l[2] in v:
pl.append(name)
continue
if l[0] == "sd":
name = l[1]
if l[2] in pl:
dm = l[3]
size = int(float(l[5].rstrip("m")))
if dm not in sd:
sd[dm] = {
"devname": dm,
"size": size
}
else:
sd[dm]["size"] += size
if l[0] == "dm":
dmname = l[1]
dname = l[2]
for dm in sd:
if sd[dm]["devname"] == dmname:
sd[dm]["devname"] = dname
return sd.values()
def vxprint(self, dg):
if dg in self.vxprint_cache:
return self.vxprint_cache[dg]
cmd = ["vxprint", "-t", "-u", "m", "-g", dg]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.vxprint_cache[dg] = out
return out
def get_mp_dmp(self):
self.dmp = {}
if not which("vxdmpadm"):
return {}
cmd = ['vxdmpadm', 'getsubpaths']
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
return {}
lines = out.split('\n')
if len(lines) < 3:
return {}
lines = lines[2:]
mp_h = {}
for line in lines:
l = line.split()
if len(l) < 4:
continue
name = l[3]
dev = self.devprefix+l[0]
if name in self.dmp:
self.dmp[name].append(dev)
else:
self.dmp[name] = [dev]
if name not in mp_h or mp_h[name] == "unknown" or mp_h[name] == name:
d = self.vxdisk_cache.get("/dev/vx/rdmp/"+name)
if d is None:
wwid = name
else:
wwid = d.get("wwid")
mp_h[name] = wwid
return mp_h
def vx_inq(self, dev):
self.load_vxdisk_cache()
if dev in self.vxdisk_cache:
return self.vxdisk_cache[dev].get("wwid", "unknown")
return "unknown"
def load_vxdisk_cache(self):
if len(self.vxdisk_cache) != 0:
return
cmd = ["/usr/sbin/vxdisk", "-p", "list"]
(out, err, ret) = justcall(cmd)
if ret != 0:
return "unknown"
for line in out.split("\n"):
l = line.split(": ")
if len(l) != 2:
continue
key = l[0].strip()
if key == "DISK":
disk = l[1].strip()
_key = "/dev/vx/rdmp/"+disk
self.vxdisk_cache[_key] = {"wwid": disk}
elif key == "SCSI3_VPD_ID":
# NAA:6000... or 6000...
self.vxdisk_cache[_key]["wwid"] = l[1].split(":")[-1].strip()
elif key == "LUN_SIZE":
self.vxdisk_cache[_key]["size"] = int(l[1].strip())/2048
elif key == "DMP_SINGLE_PATH":
self.vxdisk_cache[_key]["devpath"] = l[1].strip()
def load_vx_dmp(self):
self.load_vxdisk_cache()
if os.path.exists("/dev/rdsk"):
self.devprefix = "/dev/rdsk/"
else:
self.devprefix = "/dev/"
wwid_h = self.get_mp_dmp()
for devname in wwid_h:
rdevpath = "/dev/vx/rdmp/"+devname
if rdevpath not in self.vxdisk_cache:
continue
size = self.vxdisk_cache[rdevpath].get("size", 0)
d = self.add_dev(devname, size, "multipath")
if d is None:
continue
d.set_devpath("/dev/vx/dmp/"+devname)
d.set_devpath(rdevpath)
d.set_alias(wwid_h[devname])
for path in self.dmp[devname]:
pathdev = path.replace(self.devprefix, "")
p = self.add_dev(pathdev, size, "linear")
p.set_devpath(path)
p.add_child(devname)
d.add_parent(pathdev)
if False and self.devprefix == "/dev/rdsk/" and path.endswith("s2"):
_pathdev = pathdev[:-2]
p = self.add_dev(_pathdev, size, "linear")
p.add_child(devname)
d.add_parent(_pathdev)
def load_vx_vm(self):
for devpath in glob.glob("/dev/vx/dsk/*/*"):
devname = devpath.replace("/dev/vx/dsk/", "")
disks = self.vx_get_lv_disks(devname)
if len(disks) == 0:
# discard snaps for now
continue
size = self.vx_get_size(devname)
d = self.add_dev(devname, size, "linear")
if d is None:
continue
d.set_devpath("/dev/vx/dsk/"+devname)
d.set_devpath("/dev/vx/rdsk/"+devname)
for disk in disks:
cdevname = disk["devname"]
csize = disk["size"]
p = self.add_dev(cdevname, csize, "linear")
p.set_devpath(cdevname)
p.add_child(devname, csize, "linear")
d.add_parent(cdevname, csize, "linear")
opensvc-1.8~20170412/lib/svcVcs.py 0000644 0001750 0001750 00000016025 13073467726 016656 0 ustar jkelbert jkelbert import os
import svc
import rcExceptions as ex
from rcUtilities import justcall
from rcGlobalEnv import rcEnv
class SvcVcs(svc.Svc):
builder_props = [
"nodes",
]
def __init__(self, svcname, pkg_name=None):
self.type = "vcs"
svc.Svc.__init__(self, svcname)
self.pkg_name = pkg_name
self.domainname = None
self.vcs_operational = True
self.n_ip = 0
self.n_fs = 0
self.n_vg = 0
self.n_lv = 0
def get_res_val(self, res, p):
if not self.vcs_operational:
raise ex.excError("VCS is not operational")
cmd = ['/opt/VRTSvcs/bin/hares', '-value', res, p]
out, err, ret = justcall(cmd)
if ret != 0:
if "Cannot connect" in out:
self.vcs_operational = False
raise ex.excError(out)
return out.strip()
def get_grp_val(self, p):
if not self.vcs_operational:
raise ex.excError("VCS is not operational")
cmd = ['/opt/VRTSvcs/bin/hagrp', '-value', self.pkg_name, p]
out, err, ret = justcall(cmd)
if ret != 0:
if "Cannot connect" in out:
self.vcs_operational = False
raise ex.excError(out)
return out.strip()
def get_domainname(self):
if self.domainname is not None:
return self.domainname
cmd = ['hostname', '-d']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
self.domainname = out
return out.strip()
def set_nodes(self):
try:
s = self.get_grp_val('SystemList')
except ex.excError as e:
self.nodes = set([rcEnv.nodename])
return
l = s.split()
# SystemList goes in system/weight pairs
if len(l) < 2 or len(l) % 2 != 0:
raise ex.excError("unexpected SystemList value: %s"%s)
self.nodes = set([])
for i, w in enumerate(l):
if i % 2 == 1:
continue
self.nodes.add(w)
domainname = self.get_domainname()
if len(domainname) > 0 and rcEnv.nodename.endswith(domainname):
nodes = set([])
for w in self.nodes:
if w.endswith(domainname):
nodes.add(w)
else:
nodes.add(w+"."+domainname)
self.nodes = nodes
def builder(self):
if self.pkg_name is None:
raise ex.excInitError("pkg name is not set")
self.set_nodes()
self.load_hb()
self.load_resources()
def load_hb(self):
rid = 'hb#vcs0'
m = __import__("resHbVcs")
r = m.Hb(rid, name=self.pkg_name)
self += r
def load_resources(self):
if not self.vcs_operational:
return
cmd = ['/opt/VRTSvcs/bin/hagrp', '-resources', self.pkg_name]
out, err, ret = justcall(cmd)
if ret != 0:
return
resource_names = out.strip().split('\n')
for resource_name in resource_names:
self.load_resource(resource_name)
def load_resource(self, name):
s = self.get_res_val(name, 'Type')
if s == 'Mount':
self.load_fs(name)
self.n_fs += 1
elif s == 'CFSMount':
self.load_cfs(name)
self.n_fs += 1
elif s == 'DiskGroup':
self.load_vg(name)
self.n_vg += 1
elif s == 'CVMVolDg':
self.load_cvg(name)
self.n_vg += 1
elif s == 'Volume':
self.load_lv(name)
self.n_lv += 1
elif s == 'IP':
self.load_ip(name)
self.n_ip += 1
def load_lv(self, name):
lvname = self.get_res_val(name, 'Volume')
vgname = self.get_res_val(name, 'DiskGroup')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'lv#vcs%d'%self.n_lv
m = __import__("resDiskLvVcs"+rcEnv.sysname)
r = m.Disk(rid, vgname=vgname, lvname=lvname,
disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def load_cvg(self, name):
vgname = self.get_res_val(name, 'CVMDiskGroup')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'vg#vcs%d'%self.n_vg
m = __import__("resDiskVgVcs"+rcEnv.sysname)
r = m.Disk(rid, name=vgname, disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def load_vg(self, name):
vgname = self.get_res_val(name, 'DiskGroup')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'vg#vcs%d'%self.n_vg
m = __import__("resDiskVgVcs"+rcEnv.sysname)
r = m.Disk(rid, name=vgname, disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def load_ip(self, name):
dev = self.get_res_val(name, 'Device')
ipaddr = self.get_res_val(name, 'Address')
netmask = self.get_res_val(name, 'NetMask')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'ip#vcs%d'%self.n_ip
m = __import__("resIpVcs"+rcEnv.sysname)
r = m.Ip(rid, ipdev=dev, ipname=ipaddr, mask=netmask,
disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def load_fs(self, name):
dev = self.get_res_val(name, 'BlockDevice')
mnt = self.get_res_val(name, 'MountPoint')
mntopt = self.get_res_val(name, 'MountOpt')
fstype = self.get_res_val(name, 'FSType')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'fs#vcs%d'%self.n_fs
m = __import__("resFsVcs"+rcEnv.sysname)
r = m.Mount(rid=rid, mount_point=mnt, device=dev, fs_type=fstype,
mount_options=mntopt,
disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def load_cfs(self, name):
dev = self.get_res_val(name, 'BlockDevice')
mnt = self.get_res_val(name, 'MountPoint')
mntopt = self.get_res_val(name, 'MountOpt')
fstype = self.get_res_val(name, 'AMFMountType')
disabled = True if self.get_res_val(name, 'Enabled') == "0" else False
monitor = True if self.get_res_val(name, 'Critical') == "1" else False
rid = 'fs#vcs%d'%self.n_fs
m = __import__("resFsVcs"+rcEnv.sysname)
r = m.Mount(rid=rid, mount_point=mnt, device=dev, fs_type=fstype,
mount_options=mntopt,
disabled=disabled, monitor=monitor)
r.vcs_name = name
self += r
def resource_monitor(self):
pass
opensvc-1.8~20170412/lib/provFsExt2.py 0000644 0001750 0001750 00000000176 13073467726 017431 0 ustar jkelbert jkelbert import provFs
class ProvisioningFs(provFs.ProvisioningFs):
mkfs = ['mkfs.ext2', '-F', '-q']
info = ['tune2fs', '-l']
opensvc-1.8~20170412/lib/rcMountsFreeBSD.py 0000644 0001750 0001750 00000002475 13073467726 020360 0 ustar jkelbert jkelbert import rcMounts
from rcUtilities import *
class Mounts(rcMounts.Mounts):
df_one_cmd = ['df', '-l']
def match_mount(self, i, dev, mnt):
"""Given a line of 'mount' output, returns True if (dev, mnt) matches
this line. Returns False otherwize. Also care about weirdos like loops
and binds, ...
"""
if os.path.isdir(dev):
is_bind = True
src_dir_dev = self.get_src_dir_dev(dev)
else:
is_bind = False
if i.mnt != mnt:
return False
if i.dev == dev:
return True
if is_bind and i.dev == src_dir_dev:
return True
return False
def __init__(self):
self.mounts = []
(ret, out, err) = call(['mount'])
for l in out.split('\n'):
words = l.split()
if len(words) < 4:
return
dev = words[0]
mnt = words[2]
opts = ' '.join(words[3:]).strip('(').strip(')').split(', ')
type = opts[0]
if len(opts) < 3:
mnt_opt = ''
else:
mnt_opt = ','.join(opts[2:])
m = rcMounts.Mount(dev, mnt, type, mnt_opt)
self.mounts.append(m)
if __name__ == "__main__" :
help(Mounts)
for m in Mounts():
print(m)
opensvc-1.8~20170412/lib/rcUpdatePkgAIX.py 0000644 0001750 0001750 00000000337 13073467726 020161 0 ustar jkelbert jkelbert from subprocess import *
repo_subdir = "rpms"
def update(fpath):
cmd = ['rpm', '-U', fpath, '--force', '--ignoreos', '--nodeps']
print(' '.join(cmd))
p = Popen(cmd)
p.communicate()
return p.returncode
opensvc-1.8~20170412/lib/provFsBtrfs.py 0000644 0001750 0001750 00000006517 13073467726 017674 0 ustar jkelbert jkelbert import provFs
import tempfile
import os
import time
import rcExceptions as ex
from rcUtilities import which, justcall, lazy
from svcBuilder import conf_get_string_scope
class ProvisioningFs(provFs.ProvisioningFs):
info = ['btrfs', 'device', 'ready']
@lazy
def mkfs(self):
return ['mkfs.btrfs', '-f', '-L', self.label]
@lazy
def raw_label(self):
return '{svcname}.' + self.r.rid.replace("#", ".")
@lazy
def label(self):
return self.r.svc.svcname + '.' + self.r.rid.replace("#", ".")
def current_label(self, mnt):
cmd = ["btrfs", "filesystem", "label", mnt]
ret, out, err = self.r.call(cmd, errlog=False)
if ret == 0 and len(out.strip()) > 0:
return out.strip()
@lazy
def subvol(self):
l = self.r.mount_options.split(",")
for e in l:
if not e.startswith("subvol="):
continue
subvol = e.replace("subvol=", "")
return subvol
def cleanup(self, mnt):
cmd = ["umount", mnt]
self.r.vcall(cmd)
os.removedirs(mnt)
def write_label(self, mnt):
current_label = self.current_label(mnt)
if current_label is not None:
label = current_label
raw_label = current_label.replace(self.r.svc.svcname, "{svcname}")
else:
label = self.label
raw_label = self.raw_label
self.r.svc.config.set(self.r.rid, "dev", "LABEL="+raw_label)
self.r.svc.write_config()
self.r.device = "LABEL="+label
self.wait_label(label)
def wait_label(self, label):
if which("findfs") is None:
self.r.log.info("findfs program not found, wait arbitrary 20 seconds for label to be usable")
time.sleep(20)
cmd = ["findfs", "LABEL="+label]
for i in range(20):
out, err, ret = justcall(cmd)
self.r.log.debug("%s\n%s\n%s" % (" ".join(cmd), out, err))
if ret == 0:
return
self.r.log.info("label is not usable yet (%s)" % err.strip())
time.sleep(2)
raise ex.excError("timeout waiting for label to become usable")
def mount(self, mnt):
cmd = ["mount", "-t", "btrfs", "-o", "subvolid=0", self.r.device, mnt]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
def create_subvol(self):
if self.subvol is None:
return
mnt = tempfile.mkdtemp()
self.mount(mnt)
try:
self.write_label(mnt)
self._create_subvol(mnt)
finally:
self.cleanup(mnt)
def _create_subvol(self, mnt):
path = os.path.join(mnt, self.subvol)
if os.path.exists(path):
return
cmd = ["btrfs", "subvol", "create", path]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
def provisioner(self):
self.r.device = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "dev")
if self.r.device.startswith("LABEL=") or self.r.device.startswith("UUID="):
self.r.log.info("skip formatting because dev is specified by LABEL or UUID")
else:
provFs.ProvisioningFs.provisioner_fs(self)
self.create_subvol()
self.r.log.info("provisioned")
self.r.start()
opensvc-1.8~20170412/lib/resDiskRawSunOS.py 0000644 0001750 0001750 00000001654 13073467726 020417 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import re
import rcStatus
import resDiskRaw
class Disk(resDiskRaw.Disk):
def disklist(self):
devs = self.devlist()
l = set([])
for dev in devs:
if re.match("^/dev/rdsk/c[0-9]*", dev) is None:
continue
if not os.path.exists(dev):
continue
if re.match('^.*s[0-9]*$', dev) is None:
dev += "s2"
else:
regex = re.compile('s[0-9]*$', re.UNICODE)
dev = regex.sub('s2', dev)
l.add(dev)
return l
def devlist(self):
self.validate_devs()
l = set([])
for dev in self.devs:
if not os.path.exists(dev):
continue
if os.path.islink(dev) and not dev.startswith("/devices"):
dev = os.path.realpath(dev)
l.add(dev)
return l
opensvc-1.8~20170412/lib/rcUtilitiesFreeBSD.py 0000644 0001750 0001750 00000000444 13073467726 021040 0 ustar jkelbert jkelbert from rcUtilities import call
def check_ping(addr, timeout=5, count=1):
if ':' in addr:
cmd = ['ping6']
else:
cmd = ['ping', '-W', str(timeout)]
cmd += ['-c', repr(count), addr]
(ret, out, err) = call(cmd)
if ret == 0:
return True
return False
opensvc-1.8~20170412/lib/resIpAmazon.py 0000644 0001750 0001750 00000011004 13073467726 017627 0 ustar jkelbert jkelbert import resIp
import os
import rcStatus
from rcGlobalEnv import rcEnv
import rcExceptions as ex
from rcAmazon import Amazon
from rcUtilities import getaddr
rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname)
class Ip(resIp.Ip, Amazon):
def __init__(self,
rid=None,
ipname=None,
ipdev=None,
eip=None,
**kwargs):
resIp.Ip.__init__(self,
rid=rid,
ipname=ipname,
ipdev=ipdev,
**kwargs)
self.label = "ec2 ip %s@%s" % (ipname, ipdev)
if eip:
self.label += ", eip %s" % eip
self.eip = eip
def get_eip(self):
ip = getaddr(self.eip, True)
data = self.aws(["ec2", "describe-addresses", "--public-ips", self.eip], verbose=False)
try:
addr = data["Addresses"][0]
except:
addr = None
return addr
def get_instance_private_addresses(self):
instance_data = self.get_instance_data(refresh=True)
if instance_data is None:
raise ex.excError("can't find instance data")
ips = []
for eni in instance_data["NetworkInterfaces"]:
ips += [ pa["PrivateIpAddress"] for pa in eni["PrivateIpAddresses"] ]
return ips
def get_network_interface(self):
ifconfig = rcIfconfig.ifconfig()
intf = ifconfig.interface(self.ipdev)
ips = set(intf.ipaddr + intf.ip6addr)
instance_data = self.get_instance_data(refresh=True)
if instance_data is None:
raise ex.excError("can't find instance data")
for eni in instance_data["NetworkInterfaces"]:
_ips = set([ pa["PrivateIpAddress"] for pa in eni["PrivateIpAddresses"] ])
if len(ips & _ips) > 0:
return eni["NetworkInterfaceId"]
def is_up(self):
"""Returns True if ip is associated with this node
"""
self.getaddr()
ips = self.get_instance_private_addresses()
if self.addr not in ips:
return False
return True
def _status(self, verbose=False):
try:
s = self.is_up()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
if rcEnv.nodename in self.always_on:
if s:
return rcStatus.STDBY_UP
else:
return rcStatus.STDBY_DOWN
else:
if s:
return rcStatus.UP
else:
return rcStatus.DOWN
def check_ping(self, count=1, timeout=5):
pass
def start_assign(self):
if self.is_up():
self.log.info("ec2 ip %s is already assigned to this node" % self.addr)
return
eni = self.get_network_interface()
if eni is None:
raise ex.excError("could not find ec2 network interface for %s" % self.ipdev)
data = self.aws([
"ec2", "assign-private-ip-addresses",
"--network-interface-id", eni,
"--private-ip-address", self.addr,
"--allow-reassignment"
])
self.can_rollback = True
def start_associate(self):
if self.eip is None:
return
eip = self.get_eip()
if eip is None:
raise ex.excError("eip %s is not allocated" % self.eip)
if "PrivateIpAddress" in eip and eip["PrivateIpAddress"] == self.addr:
self.log.info("eip %s is already associated to private ip %s" % (eip["PublicIp"], self.addr))
return
data = self.aws([
"ec2", "associate-address",
"--allocation-id", eip["AllocationId"],
"--private-ip-address", self.addr,
"--instance-id", self.get_instance_id()
])
def start(self):
self.start_assign()
self.start_associate()
def stop(self):
if not self.is_up():
self.log.info("ec2 ip %s is already unassigned from this node" % self.addr)
return
eni = self.get_network_interface()
if eni is None:
raise ex.excError("could not find ec2 network interface for %s" % self.ipdev)
data = self.aws([
"ec2", "unassign-private-ip-addresses",
"--network-interface-id", eni,
"--private-ip-address", self.addr
])
def shutdown(self):
pass
def provision(self):
m = __import__("provIpAmazon")
prov = getattr(m, "ProvisioningIp")(self)
prov.provisioner()
opensvc-1.8~20170412/lib/tests/ 0000755 0001750 0001750 00000000000 13073467726 016173 5 ustar jkelbert jkelbert opensvc-1.8~20170412/lib/tests/test_svc_restart.py 0000644 0001750 0001750 00000004322 13073467726 022144 0 ustar jkelbert jkelbert # coding: utf8
from __future__ import print_function
import platform
import time
from nose.plugins.skip import Skip, SkipTest
from multiprocessing import Process
import svc
import resFsLinux
import rcExceptions as ex
import rcLogger
import rcStatus
SVCNAME = "unittest"
if platform.uname()[0] != "Linux":
raise SkipTest
class Mount(resFsLinux.Mount):
def stop(self):
resFsLinux.Mount.stop(self)
time.sleep(2)
class TestSvc:
def tearDown(self):
self.svc.action("stop")
if self.svc.node:
self.svc.node.close()
def setUp(self):
rcLogger.DEFAULT_HANDLERS = []
self.svc = svc.Svc(SVCNAME)
r = resFsLinux.Mount("fs#1",
mount_point="/srv/"+SVCNAME,
device="/tmp",
mount_options="bind,rw",
fs_type="none", restart=1)
self.svc += r
self.svc.action("start")
def test_001_resource_autorestart(self):
self.svc.vcall(["umount", "/srv/"+SVCNAME])
assert self.svc.get_resources()[0].status(refresh=True) == rcStatus.UP
def test_002_resource_monitor_during_stop(self):
"""
A resource monitor action during a stop is blocked by the action lock.
A status() call after a succesful stop does not restart resources.
"""
_svc = svc.Svc(SVCNAME)
r = Mount("fs#1",
mount_point="/srv/"+SVCNAME,
device="/tmp",
mount_options="bind,rw",
fs_type="none", restart=1)
_svc += r
_svc.action("start")
def worker(_svc):
_svc.action("stop")
_svc.node.close()
proc = Process(
target=worker,
args=[_svc],
name='worker_'+_svc.svcname,
)
proc.start()
time.sleep(0.2)
ret1 = _svc.action("resource_monitor", {"waitlock": 0})
ret2 = _svc.action("status", {"waitlock": 0, "refresh": True})
proc.join()
assert ret1 == 1
assert ret2 == 0
try:
assert r.status(refresh=True) == rcStatus.DOWN
finally:
_svc.node.close()
opensvc-1.8~20170412/lib/tests/test_import.py 0000644 0001750 0001750 00000000465 13073467726 021123 0 ustar jkelbert jkelbert import glob
import os
def test_import():
mod_d = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
for fpath in glob.glob(mod_d+"/*.py"):
if "Win" in fpath or "wmi" in fpath:
continue
mod = os.path.basename(fpath).replace(".py", "")
__import__(mod)
opensvc-1.8~20170412/lib/tests/test_svcmon.py 0000644 0001750 0001750 00000000700 13073467726 021106 0 ustar jkelbert jkelbert import svcmon
def test_svcmon():
ret = svcmon.main(argv=[])
assert ret == 0
def test_svcmon_refresh():
ret = svcmon.main(argv=["--refresh"])
assert ret == 0
def test_svcmon_updatedb():
ret = svcmon.main(argv=["--updatedb"])
assert ret == 0
def test_svcmon_verbose():
ret = svcmon.main(argv=["-v"])
assert ret == 0
def test_svcmon_cluster_verbose():
ret = svcmon.main(argv=["-v", "-c"])
assert ret == 0
opensvc-1.8~20170412/lib/tests/test_scheduler.py 0000644 0001750 0001750 00000006550 13073467726 021570 0 ustar jkelbert jkelbert import rcScheduler
import node
node = node.Node()
node.options.cron = True
def test_schedules():
tests = [
("", "2015-02-27 10:00", False),
("@0", "2015-02-27 10:00", False),
("*@0", "2015-02-27 10:00", False),
("*", "2015-02-27 10:00", True),
("*@61", "2015-02-27 10:00", True),
("09:00-09:20", "2015-02-27 10:00", False),
("09:00-09:20@31", "2015-02-27 10:00", False),
("09:00-09:00", "2015-02-27 10:00", False),
("09:20-09:00", "2015-02-27 10:00", False),
("09:00", "2015-02-27 10:00", False),
("09:00-09:20", "2015-02-27 09:09", True),
("09:00-09:20@31", "2015-02-27 09:09", True),
("09:00-09:00", "2015-02-27 09:09", True),
("09:20-09:00", "2015-02-27 09:09", True),
("09:00", "2015-02-27 09:09", True),
("* fri", "2015-10-09 10:00", True),
("* fri", "2015-10-08 10:00", False),
("* *:last", "2015-01-30 10:00", True),
("* *:last", "2015-01-31 10:00", True),
("* *:-1", "2015-01-31 10:00", True),
("* *:-1", "2015-01-24 10:00", False),
("* *:-2", "2015-01-31 10:00", False),
("* :last", "2015-01-30 10:00", False),
("* :last", "2015-01-31 10:00", True),
("* :-1", "2015-01-31 10:00", True),
("* :-2", "2015-01-30 10:00", True),
("* :-2", "2015-01-31 10:00", False),
("* :-2", "2015-01-05 10:00", False),
("* :5", "2015-01-05 10:00", True),
("* :+5", "2015-01-05 10:00", True),
("* :fifth", "2015-01-05 10:00", True),
("* :5", "2015-01-06 10:00", False),
("* :+5", "2015-01-06 10:00", False),
("* :fifth", "2015-01-06 10:00", False),
("* * * jan", "2015-01-06 10:00", True),
("* * * jan-feb", "2015-01-06 10:00", True),
("* * * %2", "2015-01-06 10:00", False),
("* * * %2+1", "2015-01-06 10:00", True),
("* * * jan-feb%2", "2015-01-06 10:00", False),
("* * * jan-feb%2+1", "2015-01-06 10:00", True),
("18:00-18:59@60 wed", "2016-08-31 18:00", True),
("18:00-18:59@60 wed", "2016-08-30 18:00", False),
("23:00-23:59@61 *:first", "2016-09-01 23:00", True),
# syntax errors
("23:00-23:59@61 *:first:*", "2016-09-01 23:00", None),
("23:00-23:59@61 *:", "2016-09-01 23:00", None),
("23:00-23:59@61 *:*", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2+1+2", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %foo", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2+foo", "2016-09-01 23:00", None),
("23:00-23:59@61 freday", "2016-09-01 23:00", None),
("23:00-23:59@61 * * junuary", "2016-09-01 23:00", None),
("23:00-23:59@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59-01:00@61", "2016-09-01 23:00", None),
("23:00-23:59:00@61 * * %2%3", "2016-09-01 23:00", None),
("23:00-23:59@61@10", "2016-09-01 23:00", None),
("23:00-23:59 * * * * *", "2016-09-01 23:00", None),
]
sched = rcScheduler.Scheduler()
for test in tests:
assert sched.test_schedule(*test)
def test_fork():
def dummy(*args, **kwargs):
return 0
rcScheduler.fork(dummy, args=[node], kwargs={}, serialize=True, delay=0)
rcScheduler.fork(dummy, args=[node], kwargs={}, serialize=False, delay=0)
opensvc-1.8~20170412/lib/tests/test_lock.py 0000644 0001750 0001750 00000001265 13073467726 020540 0 ustar jkelbert jkelbert import lock
def test_lock():
fpath = "/tmp/test.lock"
def inline_lock():
return lock.lock(lockfile=fpath, timeout=0, intent="test")
def worker():
import sys
try:
sys.exit(inline_lock())
except lock.lockTimeout:
sys.exit(255)
def proc_lock():
from multiprocessing import Process
proc = Process(target=worker)
proc.start()
proc.join()
return proc.exitcode
lockfd = inline_lock()
assert lockfd > 0
# already lock owner
relockfd = inline_lock()
assert relockfd == None
# lock conflict
assert proc_lock() == 255
# release
lock.unlock(lockfd)
opensvc-1.8~20170412/lib/tests/test_utilities.py 0000644 0001750 0001750 00000006264 13073467726 021627 0 ustar jkelbert jkelbert # coding: utf-8
from __future__ import print_function
from rcUtilities import *
def test_lazy():
class Test(object):
@lazy
def foo(self):
return 0
testobj = Test()
assert lazy_initialized(testobj, "foo") == False
assert testobj.foo == 0
assert lazy_initialized(testobj, "foo") == True
def test_is_string():
assert is_string(1) == False
assert is_string("a") == True
assert is_string("bêh") == True
def test_empty_string():
assert empty_string("") == True
assert empty_string("foo") == False
assert empty_string("fêo") == False
def test_mimport():
mod = mimport("res", "fs", "linux")
assert hasattr(mod, "Mount") == True
mod = mimport("prov", "fs", "linux")
assert hasattr(mod, "ProvisioningFs") == True
def test_ximport():
mod = ximport("resFs")
assert hasattr(mod, "Mount") == True
def test_is_exe():
assert is_exe("/bin/ls") == True
assert is_exe("/dev/null") == False
assert is_exe("/tmp") == False
assert is_exe("/etc/hosts") == False
def test_which():
assert which("ls") == "/bin/ls"
assert which("foo") == None
def test_justcall():
out, err, ret = justcall(["ls", "/foo"])
assert is_string(out) == True
assert is_string(err) == True
assert ret == 2
def test_vcall():
ret, out, err = vcall(["ls", "/foo"])
assert is_string(out) == True
assert is_string(err) == True
assert ret == 2
def test_call():
ret, out, err = call(["ls", "/foo"])
assert is_string(out) == True
assert is_string(err) == True
assert ret == 2
def test_qcall():
ret = qcall(["ls", "/foo"])
assert ret == 2
def test_getmount():
assert getmount("/bin") == "/"
assert getmount("/") == "/"
def test_protected_mount():
assert protected_mount("/bin") == True
assert protected_mount("/bin/") == True
assert protected_mount("/mysvc") == True
def test_protected_dir():
assert protected_dir("/bin") == True
assert protected_dir("/bin/") == True
assert protected_dir("/mysvc") == False
def test_convert_bool():
assert convert_bool("tRue") == True
assert convert_bool("y") == True
assert convert_bool("Y") == True
assert convert_bool("1") == True
assert convert_bool(1) == True
assert convert_bool("FaLse") == False
assert convert_bool("no") == False
assert convert_bool("n") == False
assert convert_bool("0") == False
assert convert_bool(0) == False
def test_convert_size():
assert convert_size("1k") == 1024
assert convert_size("1K") == 1024
assert convert_size("1KB") == 1024
assert convert_size("1 K") == 1024
assert convert_size("1 Ki") == 1000
assert convert_size("1 KiB") == 1000
assert convert_size("1.1 Ki") == 1100
def test_cidr_to_dotted():
assert cidr_to_dotted(22) == "255.255.252.0"
def test_to_dotted():
assert to_dotted(22) == "255.255.252.0"
assert to_dotted("22") == "255.255.252.0"
assert to_dotted("255.255.252.0") == "255.255.252.0"
def test_hexmask_to_dotted():
assert hexmask_to_dotted("ffffff00") == "255.255.255.0"
def test_dotted_to_cidr():
assert dotted_to_cidr("255.255.252.0") == "22"
def test_term_width():
assert term_width() > 0
opensvc-1.8~20170412/lib/tests/test_svcmgr.py 0000644 0001750 0001750 00000010455 13073467726 021112 0 ustar jkelbert jkelbert # coding: utf8
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import json
import socket
from StringIO import StringIO
import svcmgr
UNICODE_STRING = "bêh"
def test_svcmgr_print_schedule():
ret = svcmgr.main(argv=["print", "schedule"])
assert ret == 0
def test_svcmgr_print_schedule_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = svcmgr.main(argv=["print", "schedule", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), dict)
def test_svcmgr_print_config():
ret = svcmgr.main(argv=["print", "config"])
assert ret == 0
def test_svcmgr_print_config_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = svcmgr.main(argv=["print", "config", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), dict)
def test_svcmgr_print_status():
ret = svcmgr.main(argv=["print", "status"])
assert ret == 0
def test_svcmgr_print_status_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = svcmgr.main(argv=["print", "status", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), dict)
def test_create_empty():
ret = svcmgr.main(argv=["create", "-s", "unittest"])
assert ret == 0
def test_set_default():
ret = svcmgr.main(argv=["-s", "unittest", "set", "--param", "comment", "--value", UNICODE_STRING])
assert ret == 0
ret = svcmgr.main(argv=["-s", "unittest", "set", "--param", "env.list_entry_ref_indirect_eval2", "--value", "{nodes[$(0//(3//{#nodes}))]}"])
assert ret == 0
def test_get_default():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = svcmgr.main(argv=["-s", "unittest", "get", "--param", "comment"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
from rcUtilities import try_decode
print(output)
assert ret == 0
assert try_decode(output) == UNICODE_STRING
def test_unset_default():
ret = svcmgr.main(argv=["-s", "unittest", "unset", "--param", "comment"])
assert ret == 0
def test_get_default_not_found():
_stderr = sys.stderr
try:
err = StringIO()
sys.stderr = err
ret = svcmgr.main(argv=["-s", "unittest", "get", "--param", "comment"])
output = err.getvalue().strip()
finally:
sys.stderr = _stderr
assert ret == 1
assert "not found" in output
def test_get_list_entry_ref_indirect_eval2():
nodename = socket.gethostname().lower()
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = svcmgr.main(argv=["-s", "unittest", "get", "--param", "env.list_entry_ref_indirect_eval2", "--eval"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
assert ret == 0
assert output == nodename
def test_validate_config():
ret = svcmgr.main(argv=["validate", "config", "-s", "unittest"])
assert ret == 0
def test_frozen():
ret = svcmgr.main(argv=["frozen", "-s", "unittest"])
assert ret == 0
def test_freeze():
ret = svcmgr.main(argv=["freeze", "-s", "unittest"])
assert ret == 0
ret = svcmgr.main(argv=["frozen", "-s", "unittest"])
assert ret == 1
def test_thaw():
ret = svcmgr.main(argv=["thaw", "-s", "unittest"])
assert ret == 0
ret = svcmgr.main(argv=["frozen", "-s", "unittest"])
assert ret == 0
def test_logs():
ret = svcmgr.main(argv=["logs", "-s", "unittest"])
assert ret == 0
def test_push():
ret = svcmgr.main(argv=["push", "-s", "unittest"])
assert ret == 0
def test_pull():
ret = svcmgr.main(argv=["pull", "-s", "unittest"])
assert ret == 0
def test_delete():
ret = svcmgr.main(argv=["delete", "-s", "unittest"])
assert ret == 0
opensvc-1.8~20170412/lib/tests/test_nodemgr.py 0000644 0001750 0001750 00000006730 13073467726 021245 0 ustar jkelbert jkelbert # coding: utf8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import json
from StringIO import StringIO
import nodemgr
UNICODE_STRING = "bêh"
def test_nodemgr_print_schedule():
ret = nodemgr.main(argv=["print", "schedule"])
assert ret == 0
def test_nodemgr_print_schedule_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = nodemgr.main(argv=["print", "schedule", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), list)
def test_nodemgr_print_config():
ret = nodemgr.main(argv=["print", "config"])
assert ret == 0
def test_nodemgr_print_config_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = nodemgr.main(argv=["print", "config", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), dict)
def test_nodemgr_print_authconfig():
ret = nodemgr.main(argv=["print", "authconfig"])
assert ret == 0
def test_nodemgr_print_authconfig_json():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = nodemgr.main(argv=["print", "authconfig", "--format", "json", "--color", "no"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
print(output)
data = json.loads(output)
assert ret == 0
assert isinstance(json.loads(output), dict)
def test_set():
ret = nodemgr.main(argv=["set", "--param", "unittest.comment", "--value", UNICODE_STRING])
assert ret == 0
def test_get():
_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
ret = nodemgr.main(argv=["get", "--param", "unittest.comment"])
output = out.getvalue().strip()
finally:
sys.stdout = _stdout
from rcUtilities import try_decode
print(output)
assert ret == 0
assert try_decode(output) == UNICODE_STRING
def test_unset():
ret = nodemgr.main(argv=["unset", "--param", "unittest.comment"])
assert ret == 0
def test_get_not_found():
_stderr = sys.stdout
try:
err = StringIO()
sys.stderr = err
ret = nodemgr.main(argv=["get", "--param", "unittest.comment"])
output = err.getvalue().strip()
finally:
sys.stderr = _stderr
assert ret == 1
assert "not found" in output
def test_nodemgr_checks():
ret = nodemgr.main(argv=["checks"])
assert ret == 0
def test_nodemgr_sysreport():
ret = nodemgr.main(argv=["sysreport"])
assert ret == 0
def test_nodemgr_pushasset():
ret = nodemgr.main(argv=["pushasset"])
assert ret == 0
def test_nodemgr_collect_stats():
ret = nodemgr.main(argv=["collect_stats"])
assert ret == 0
def test_nodemgr_pushstats():
ret = nodemgr.main(argv=["pushstats"])
assert ret == 0
def test_nodemgr_pushpkg():
ret = nodemgr.main(argv=["pushpkg"])
assert ret == 0
def test_nodemgr_pushpatch():
ret = nodemgr.main(argv=["pushpatch"])
assert ret == 0
def test_nodemgr_pushdisks():
ret = nodemgr.main(argv=["pushdisks"])
assert ret == 0
def test_nodemgr_logs():
ret = nodemgr.main(argv=["logs"])
assert ret == 0
opensvc-1.8~20170412/lib/tests/test_svc_fs.py 0000644 0001750 0001750 00000004347 13073467726 021077 0 ustar jkelbert jkelbert # coding: utf8
from __future__ import print_function
import platform
from nose.plugins.skip import Skip, SkipTest
import svc
import resFsLinux
import rcExceptions as ex
import rcLogger
SVCNAME = "unittest"
if platform.uname()[0] != "Linux":
raise SkipTest
class TestSvc:
def tearDown(self):
if self.svc.node:
self.svc.node.close()
def setUp(self):
rcLogger.DEFAULT_HANDLERS = []
self.svc = svc.Svc(SVCNAME)
r = resFsLinux.Mount(rid="fs#1",
mount_point="/srv/"+SVCNAME,
device="/tmp",
mount_options="bind,rw",
fs_type="none")
self.svc += r
def test_002_start(self):
ret = self.svc.action("start")
assert ret == 0
def test_003_restart(self):
ret = self.svc.action("restart")
assert ret == 0
def test_004_action_on_wrong_rid(self):
try:
self.svc.action("start", {"rid": "fs#2"})
# shouldn't reach here, fs#2 doesn't exist
assert False
except ex.excError:
assert True
def test_005_update(self):
ret = self.svc.action("update", {
"resource": ['{"rtype": "fs", "mnt": "/srv/{svcname}/foo", "dev": "/tmp", "type": "none", "mnt_opt": "bind"}'],
"provision": True,
})
assert ret == 0
def test_006_update(self):
ret = self.svc.action("update", {
"resource": [{"rtype": "fs", "mnt": "/srv/{svcname}/foo", "dev": "/tmp", "type": "none", "mnt_opt": "bind"}],
"provision": True,
})
assert ret == 0
def test_007_start(self):
ret = self.svc.action("start")
assert ret == 0
def test_008_stop(self):
ret = self.svc.action("stop")
assert ret == 0
def test_009_delete_rid_unprovision(self):
ret = self.svc.action("delete", {"rid": "fs#1", "unprovision": True})
assert ret == 0
def test_010_delete_unprovision(self):
ret = self.svc.action("delete", {"unprovision": True})
assert ret == 0
def test_011_pull_provision(self):
ret = self.svc.action("pull", {"provision": True})
assert ret == 0
opensvc-1.8~20170412/lib/checkFsInodeAIX.py 0000644 0001750 0001750 00000001743 13073467726 020277 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_i"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-i']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 7:
continue
if l[1] == '-':
continue
if ":/" in l[0]:
continue
r.append({
'chk_instance': l[6],
'chk_value': l[5],
'chk_svcname': self.find_svc(l[6]),
})
return r
opensvc-1.8~20170412/lib/checkMpathOSF1.py 0000644 0001750 0001750 00000001543 13073467726 020106 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
from rcDiskInfoOSF1 import diskInfo
class check(checks.check):
chk_type = "mpath"
svcdevs = {}
def find_svc(self, dev):
devpath = '/dev/rdisk/'+dev
for svc in self.svcs:
if svc not in self.svcdevs:
try:
devs = svc.disklist()
except Exception as e:
devs = []
self.svcdevs[svc] = devs
if dev in self.svcdevs[svc]:
return svc.svcname
return ''
def do_check(self):
di = diskInfo()
r = []
for dev, data in di.h.items():
r.append({'chk_instance': data['wwid'],
'chk_value': str(data['path_count']),
'chk_svcname': self.find_svc(dev),
})
return r
opensvc-1.8~20170412/lib/resScsiReservLinux.py 0000644 0001750 0001750 00000013401 13073467726 021224 0 ustar jkelbert jkelbert import os
import resources as Res
import uuid
import re
import time
import rcStatus
import rcExceptions as ex
from rcUtilities import which
from subprocess import *
import resScsiReserv
from rcGlobalEnv import rcEnv
class ScsiReserv(resScsiReserv.ScsiReserv):
def scsireserv_supported(self):
if which('sg_persist') is None:
self.log.debug("sg_persist must be installed to use scsi-3 reservations" )
return False
return True
def set_read_only(self, val):
if rcEnv.sysname != "Linux":
return
os.environ["SG_PERSIST_O_RDONLY"] = str(val)
def ack_unit_attention(self, d):
if not os.path.exists(d):
return 0
i = self.preempt_timeout
self.set_read_only(0)
while i>0:
i -= 1
cmd = [ 'sg_persist', '-n', '-r', d ]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
out = p.communicate()
ret = p.returncode
if "unsupported service action" in out[1]:
self.log.error("disk %s does not support persistent reservation" % d)
raise ex.excScsiPrNotsupported
if "error opening file" in out[1]:
return 0
if "Unit Attention" in out[0] or ret != 0:
self.log.debug("disk %s reports 'Unit Attention' ... waiting" % d)
time.sleep(1)
continue
break
if i == 0:
self.log.error("timed out waiting for 'Unit Attention' to go away on disk %s" % d)
return 1
return 0
def disk_registered(self, disk):
self.set_read_only(1)
cmd = [ 'sg_persist', '-n', '-k', disk ]
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("failed to read registrations for disk %s" % disk)
if self.hostid in out:
return True
return False
def disk_register(self, disk):
self.set_read_only(0)
cmd = [ 'sg_persist', '-n', '--out', '--register-ignore', '--param-sark='+self.hostid, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to register key %s with disk %s" % (self.hostid, disk))
return ret
def disk_unregister(self, disk):
self.set_read_only(0)
cmd = [ 'sg_persist', '-n', '--out', '--register-ignore', '--param-rk='+self.hostid, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to unregister key %s with disk %s" % (self.hostid, disk))
return ret
def dev_to_mpath_dev(self, devpath):
if which("multipath") is None:
return devpath
cmd = ["multipath", "-l", "-v1", devpath]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError(err)
_devpath = "/dev/mapper/"+out.strip()
if not os.path.exists(_devpath):
raise ex.excError("%s does not exist")
return _devpath
def get_reservation_key(self, disk):
try:
return self._get_reservation_key(disk)
except ex.excError as e:
disk = self.dev_to_mpath_dev(disk)
return self._get_reservation_key(disk)
def _get_reservation_key(self, disk):
self.set_read_only(1)
cmd = [ 'sg_persist', '-n', '-r', disk ]
(ret, out, err) = self.call(cmd, errlog=None)
if ret != 0:
raise ex.excError("failed to list reservation for disk %s" % disk)
if 'Key=' not in out:
return None
for w in out.split():
if 'Key=' in w:
return w.split('=')[1]
raise Exception()
def disk_reserved(self, disk):
try:
return self._disk_reserved(disk)
except ex.excError as e:
disk = self.dev_to_mpath_dev(disk)
return self._disk_reserved(disk)
def _disk_reserved(self, disk):
self.set_read_only(1)
cmd = [ 'sg_persist', '-n', '-r', disk ]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError("failed to read reservation for disk %s" % disk)
if self.hostid in out:
return True
return False
def disk_release(self, disk):
self.set_read_only(0)
cmd = [ 'sg_persist', '-n', '--out', '--release', '--param-rk='+self.hostid, '--prout-type='+self.prtype, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to release disk %s" % disk)
return ret
def disk_clear_reservation(self, disk):
cmd = [ 'sg_persist', '-n', '--out', '--clear', '--param-rk='+self.hostid, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to clear reservation on disk %s" % disk)
return ret
def disk_reserve(self, disk):
self.set_read_only(0)
cmd = [ 'sg_persist', '-n', '--out', '--reserve', '--param-rk='+self.hostid, '--prout-type='+self.prtype, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to reserve disk %s" % disk)
return ret
def _disk_preempt_reservation(self, disk, oldkey):
m = __import__("rcDiskInfo"+rcEnv.sysname)
if self.no_preempt_abort or m.diskInfo(deferred=True).disk_vendor(disk).strip() in ["VMware"]:
preempt_opt = '--preempt'
else:
preempt_opt = '--preempt-abort'
self.set_read_only(0)
cmd = [ 'sg_persist', '-n', '--out', preempt_opt, '--param-sark='+oldkey, '--param-rk='+self.hostid, '--prout-type='+self.prtype, disk ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
self.log.error("failed to preempt reservation for disk %s" % disk)
return ret
opensvc-1.8~20170412/lib/resDiskRawHP-UX.py 0000644 0001750 0001750 00000001532 13073467726 020244 0 ustar jkelbert jkelbert import resDiskRaw
import os
import rcStatus
import re
from rcUtilities import justcall
class Disk(resDiskRaw.Disk):
def __init__(self,
rid=None,
devs=set([]),
user=None,
group=None,
perm=None,
create_char_devices=False,
**kwargs):
resDiskRaw.Disk.__init__(self,
rid=rid,
devs=devs,
user=user,
group=group,
perm=perm,
create_char_devices=False,
**kwargs)
def verify_dev(self, dev):
cmd = ["diskinfo", dev]
out, err, ret = justcall(cmd)
if ret != 0:
return False
return True
opensvc-1.8~20170412/lib/resContainerAmazon.py 0000644 0001750 0001750 00000025171 13073467726 021213 0 ustar jkelbert jkelbert import rcStatus
import resources as Res
import time
import os
import rcExceptions as ex
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
from rcUtilitiesLinux import check_ping
import resContainer
class CloudVm(resContainer.Container):
startup_timeout = 240
shutdown_timeout = 120
save_timeout = 240
def __init__(self,
rid,
name,
guestos=None,
cloud_id=None,
image_id=None,
size="t2.micro",
key_name=None,
subnet=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.amazon",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.cloud_id = cloud_id
self.save_name = name + '.save'
self.size_id = size
self.image_id = image_id
self.key_name = key_name
self.subnet_name = subnet
self.addr = None
def keyfile(self):
kf = [os.path.join(rcEnv.pathetc, self.key_name+'.pem'),
os.path.join(rcEnv.pathetc, self.key_name+'.pub'),
os.path.join(rcEnv.pathvar, self.key_name+'.pem'),
os.path.join(rcEnv.pathvar, self.key_name+'.pub')]
for k in kf:
if os.path.exists(k):
return k
raise ex.excError("key file for key name '%s' not found"%self.key_name)
def rcp_from(self, src, dst):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote copy not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable ip to send files to')
timeout = 5
cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
self.addr+':'+src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote copy not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable ip to send files to')
timeout = 5
cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
src, self.addr+':'+dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcmd(self, cmd):
if self.guestos == "Windows":
""" Windows has no sshd.
"""
raise ex.excNotSupported("remote commands not supported on Windows")
self.getaddr()
if self.addr is None:
raise ex.excError('no usable ip to send command to')
if type(cmd) == str:
cmd = cmd.split(" ")
timeout = 5
cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'ForwardX11=no',
'-o', 'BatchMode=yes',
'-n',
'-o', 'ConnectTimeout='+str(timeout),
'-i', self.keyfile(),
self.addr] + cmd
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def get_subnet(self):
c = self.get_cloud()
for subnet in c.driver.ex_list_subnets():
if subnet.name == self.subnet_name:
return subnet
raise ex.excError("%s subnet not found"%self.subnet_name)
def get_size(self):
c = self.get_cloud()
for size in c.driver.list_sizes():
if size.id == self.size_id:
return size
raise ex.excError("%s size not found"%self.size_name)
def get_cloud(self):
if hasattr(self, 'cloud'):
return self.cloud
c = self.svc.node.cloud_get(self.cloud_id)
self.cloud = c
return self.cloud
def get_node(self):
c = self.get_cloud()
l = c.list_nodes()
for n in l:
if n.name == self.name:
return n
return
def get_image(self, image_id):
c = self.get_cloud()
l = c.driver.list_images(ex_image_ids=[image_id])
d = {}
for image in l:
if image.id == image_id:
# exact match
return image
raise ex.excError("image %s not found" % image_id)
def has_image(self, image_id):
c = self.get_cloud()
l = c.driver.list_images([image_id])
for image in l:
if image.id == image_id:
return True
return False
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def getaddr(self):
if self.addr is not None:
return
n = self.get_node()
if n is None:
raise ex.excError("could not get node details")
ips = set(n.public_ips+n.private_ips)
if len(ips) == 0:
return 0
# find first pinging ip
for ip in ips:
if check_ping(ip, timeout=1, count=1):
self.addr = ip
break
return 0
def files_to_sync(self):
return []
def check_capabilities(self):
return True
def ping(self):
if self.addr is None:
return 0
return check_ping(self.addr, timeout=1, count=1)
def start(self):
if self.is_up():
self.log.info("container %s already started" % self.name)
return
if rcEnv.nodename in self.svc.drpnodes:
self.install_drp_flag()
self.container_start()
self.can_rollback = True
self.wait_for_startup()
def container_start(self):
"""
RUNNING = 0
REBOOTING = 1
TERMINATED = 2
PENDING = 3
UNKNOWN = 4
STOPPED = 5
SUSPENDED = 6
ERROR = 7
PAUSED = 8
"""
from libcloud.compute.types import NodeState
n = self.get_node()
if n is None:
self.provision()
return
elif n.state == NodeState().RUNNING:
self.log.info("already running")
return
elif n.state == NodeState().PENDING:
self.log.info("already pending. wait for running state.")
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
return
elif n.state == NodeState().REBOOTING:
self.log.info("currently rebooting. wait for running state.")
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
return
elif n.state == NodeState().STOPPED:
c = self.get_cloud()
self.log.info("starting ebs ec2 instance through aws")
c.driver.ex_start_node(n)
self.log.info("wait for container up status")
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
return
raise ex.excError("don't know what to do with node in state: %s"%NodeState().tostring(n.state))
def container_reboot(self):
c = self.get_cloud()
n = self.get_node()
try:
c.driver.reboot_node(n)
except Exception as e:
raise ex.excError(str(e))
def wait_for_startup(self):
pass
def stop(self):
if self.is_down():
self.log.info("container %s already stopped" % self.name)
return
try:
self.container_stop()
self.wait_for_shutdown()
except ex.excError:
self.container_forcestop()
self.wait_for_shutdown()
def container_stop(self):
cmd = "shutdown -h now"
self.log.info("remote command: %s"%cmd)
self.rcmd(cmd)
def container_forcestop(self):
c = self.get_cloud()
n = self.get_node()
self.log.info("stopping ebs ec2 instance through aws")
c.driver.ex_stop_node(n)
def print_obj(self, n):
for k in dir(n):
if '__' in k:
continue
print(k, "=", getattr(n, k))
def is_up(self):
from libcloud.compute.types import NodeState
n = self.get_node()
if n is not None and n.state == NodeState().RUNNING:
return True
if n is None:
self.status_log("state:unknown")
else:
self.status_log("state:"+NodeState().tostring(n.state))
return False
def get_container_info(self):
self.info = {'vcpus': '0', 'vmem': '0'}
c = self.get_cloud()
n = self.get_node()
try:
size = c.driver.ex_get_size(n.extra['flavorId'])
self.info['vmem'] = str(size.ram)
except:
pass
return self.info
def check_manual_boot(self):
return True
def install_drp_flag(self):
pass
def provision(self):
prereq = True
if self.image_id is None:
self.log.error("the image keyword is mandatory for the provision action")
prereq &= False
if self.size_id is None:
self.log.error("the size keyword is mandatory for the provision action")
prereq &= False
if self.subnet_name is None:
self.log.error("the subnet keyword is mandatory for the provision action")
prereq &= False
if self.key_name is None:
self.log.error("the key_name keyword is mandatory for the provision action")
prereq &= False
if not prereq:
raise ex.excError()
c = self.get_cloud()
image = self.get_image(self.image_id)
size = self.get_size()
subnet = self.get_subnet()
self.log.info("create instance %s, size %s, image %s, key %s, subnet %s"%(self.name, size.name, image.name, self.key_name, subnet.name))
c.driver.create_node(name=self.name, size=size, image=image, ex_keyname=self.key_name, ex_subnet=subnet)
self.log.info("wait for container up status")
self.wait_for_fn(self.is_up, self.startup_timeout, 5)
opensvc-1.8~20170412/lib/resContainerZone.py 0000644 0001750 0001750 00000042351 13073467726 020700 0 ustar jkelbert jkelbert from datetime import datetime
import rcStatus
import resources as Res
import time
import os
from rcUtilities import justcall, qcall
from stat import *
import resContainer
from rcExceptions import excError
from rcZfs import zfs_setprop
from rcGlobalEnv import rcEnv
ZONECFG="/usr/sbin/zonecfg"
PGREP="/usr/bin/pgrep"
PWAIT="/usr/bin/pwait"
INIT="/sbin/init"
SVCS="/usr/bin/svcs"
MULTI_USER_SMF="svc:/milestone/multi-user:default"
class Zone(resContainer.Container):
"""
Zone container resource driver.
"""
def __init__(self,
rid,
name,
guestos="SunOS",
delete_on_stop=False,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.zone",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.label = name
self.state = None
self.delete_on_stop = delete_on_stop
self.zone_refresh()
self.runmethod = [ '/usr/sbin/zlogin', '-S', name ]
self.zone_cf = "/etc/zones/"+self.name+".xml"
self.delayed_noaction = True
def zone_cfg_dir(self):
return os.path.join(rcEnv.pathvar, self.svc.svcname, "zonecfg")
def zone_cfg_path(self):
return os.path.join(self.zone_cfg_dir(), self.name+".cfg")
def export_zone_cfg(self):
cfg_d = self.zone_cfg_dir()
if not os.path.exists(cfg_d):
os.makedirs(cfg_d)
cfg = self.zone_cfg_path()
cmd = [ZONECFG, "-z", self.name, "export", "-f", cfg]
ret, out, err = self.vcall(cmd)
if ret != 0 and not os.path.exists(cfg):
raise ex.excError(err)
def get_zonepath_from_zonecfg_cmd(self):
cmd = [ZONECFG, '-z', self.name, 'info', 'zonepath']
out, err, ret = justcall(cmd)
if ret != 0:
raise excError("unable to determine zonepath using %s"%' '.join(cmd))
zp = out.replace("zonepath: ", "").strip()
return zp
def get_zonepath_from_zonecfg_export(self):
fpath = self.zone_cfg_path()
if not os.path.exists(fpath):
raise excError("zone config export file %s not found. unable to determine zonepath" % fpath)
with open(fpath, "r") as f:
buff = f.read()
for line in buff.split("\n"):
if "set zonepath" in line:
return line.split("=")[-1].strip()
raise excError("set zonepath command not found in %s" % fpath)
def get_zonepath(self):
if hasattr(self, "zonepath"):
return self.zonepath
try:
zp = self.get_zonepath_from_zonecfg_cmd()
except:
zp = self.get_zonepath_from_zonecfg_export()
self.zonepath = zp
return zp
def files_to_sync(self):
return [self.zone_cf]
def zonecfg(self, zonecfg_args=[]):
cmd = [ZONECFG, '-z', self.name] + zonecfg_args
(ret, out, err) = self.vcall(cmd,err_to_info=True)
if ret != 0:
msg = '%s failed status: %i\n%s' % (" ".join(cmd), ret, out)
self.log.error(msg)
raise excError(msg)
else:
msg = '%s done status: %i\n%s' % (" ".join(cmd), ret, out)
self.log.info(msg)
self.zone_refresh()
return ret
def zoneadm(self, action, option=None):
if action in [ 'ready' , 'boot' ,'shutdown' , 'halt' ,'attach', 'detach', 'install', 'clone' ] :
cmd = ['zoneadm', '-z', self.name, action ]
else:
self.log.error("unsupported zone action: %s" % action)
return 1
if option is not None:
cmd += option
t = datetime.now()
(ret, out, err) = self.vcall(cmd,err_to_info=True)
len = datetime.now() - t
if ret != 0:
msg = '%s failed status: %i in %s logs in %s' % (' '.join(cmd), ret, len, out)
self.log.error(msg)
raise excError(msg)
else:
self.log.info('%s done in %s - ret %i - logs in %s'
% (' '.join(cmd), len, ret, out))
self.zone_refresh()
return ret
def set_zonepath_perms(self):
self.get_zonepath()
if not os.path.exists(self.zonepath):
os.makedirs(self.zonepath)
s = os.stat(self.zonepath)
if s.st_uid != 0 or s.st_gid != 0:
self.log.info("set %s ownership to uid 0 gid 0"%self.zonepath)
os.chown(self.zonepath, 0, 0)
mode = s[ST_MODE]
if (S_IWOTH&mode) or (S_IXOTH&mode) or (S_IROTH&mode) or \
(S_IWGRP&mode) or (S_IXGRP&mode) or (S_IRGRP&mode):
self.vcall(['chmod', '700', self.zonepath])
def rcp_from(self, src, dst):
self.get_zonepath()
src = os.path.realpath(self.zonepath + '/root/' + src)
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
self.get_zonepath()
dst = os.path.realpath(self.zonepath + '/root/' + dst)
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def attach(self):
self.zone_refresh()
if self.state in ('installed' , 'ready', 'running'):
self.log.info("zone container %s already installed" % self.name)
return 0
elif self.state is None:
cmd = [ZONECFG, "-z", self.name, "-f", self.zone_cfg_path()]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
try:
self.umount_fs_in_zonepath()
self.zoneadm('attach')
except excError:
self.zoneadm('attach', ['-F'])
self.can_rollback = True
def delete(self):
if not self.delete_on_stop:
return 0
self.zone_refresh()
if self.state is None:
self.log.info("zone container %s already deleted" % self.name)
return 0
cmd = [ZONECFG, "-z", self.name, "delete", "-F"]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
return 0
def detach(self):
self.zone_refresh()
if self.state == "configured" :
self.log.info("zone container %s already detached/configured" % self.name)
return 0
return self.zoneadm('detach')
def ready(self):
self.zone_refresh()
if self.state == 'ready' or self.state == "running" :
self.log.info("zone container %s already ready" % self.name)
return 0
self.set_zonepath_perms()
return self.zoneadm('ready')
def install_drp_flag(self):
self.get_zonepath()
flag = os.path.join(self.zonepath, ".drp_flag")
self.log.info("install drp flag in container : %s"%flag)
with open(flag, 'w') as f:
f.write(' ')
f.close()
def get_smf_state(self, smf=None):
cmd = self.runmethod + [SVCS, '-H', '-o', 'state', smf]
(out, err, status) = justcall(cmd)
if status == 0:
return out.split('\n')[0]
else:
return False
def is_smf_state(self, smf=None, value=None):
current_value = self.get_smf_state(smf)
if current_value is False:
return False
elif current_value == value:
return True
else:
return False
def is_multi_user(self):
return self.is_smf_state(MULTI_USER_SMF, "online")
def wait_multi_user(self):
self.log.info("wait for smf state on on %s", MULTI_USER_SMF)
self.wait_for_fn(self.is_multi_user, self.startup_timeout, 2)
def boot(self):
"return 0 if zone is running else return self.zoneadm('boot')"
self.zone_refresh()
if self.state == "running" :
self.log.info("zone container %s already running" % self.name)
return 0
self.zoneadm('boot')
if self.state == "running":
return(0)
else:
raise(excError("zone should be running"))
self.log.info("wait for zone operational")
self.wait_for_fn(self.operational, self.startup_timeout, 2)
def halt(self):
""" Need wait poststat after returning to installed state on ipkg
example : /bin/ksh -p /usr/lib/brand/ipkg/poststate zonename zonepath 5 4
"""
self.zone_refresh()
if self.state in [ 'installed', 'configured'] :
self.log.info("zone container %s already stopped" % self.name)
return 0
if self.state == 'running':
(ret, out, err) = self.vcall(['zlogin' , self.name , '/sbin/init' , '0'])
for t in range(self.shutdown_timeout):
self.zone_refresh()
if self.state == 'installed':
for t2 in range(self.shutdown_timeout):
time.sleep(1)
(out,err,st) = justcall([ 'pgrep', '-fl', 'ipkg/poststate.*'+ self.name])
if st == 0 :
self.log.info("Waiting for ipkg poststate complete: %s" % out)
else:
break
return 0
time.sleep(1)
self.log.info("timeout out waiting for %s shutdown", self.name)
ret = self.zoneadm('halt')
if ret != 0:
return ret
return 0
def container_start(self):
return self.boot()
def _status(self, verbose=False):
self.zone_refresh()
if self.state == 'running' :
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_UP
else:
return rcStatus.UP
else:
if rcEnv.nodename in self.always_on:
return rcStatus.STDBY_DOWN
else:
return rcStatus.DOWN
def zone_refresh(self):
""" refresh Zone object attributes:
state
zonepath
brand
from zoneadm -z zonename list -p
zoneid:zonename:state:zonepath:uuid:brand:ip-type
"""
(out,err,st) = justcall([ 'zoneadm', '-z', self.name, 'list', '-p' ])
if st == 0 :
out = out.strip()
l = out.split(':')
n_fields = len(l)
if n_fields == 9:
(zoneid,zonename,state,zonepath,uuid,brand,iptype,rw,macp) = l
elif n_fields == 10:
(zoneid,zonename,state,zonepath,uuid,brand,iptype,rw,macp,dummy) = l
elif n_fields == 7:
(zoneid,zonename,state,zonepath,uuid,brand,iptype) = l
else:
raise excError("Unexpected zoneadm list output: %s"%out)
if zonename == self.name :
self.state = state
self.zonepath = zonepath
self.brand = brand
return True
else:
return False
else:
return False
def is_running(self):
"return True if zone is running else False"
self.zone_refresh()
if self.state == 'running' :
return True
else:
return False
def is_up(self):
"return self.is_running status"
return self.is_running()
def operational(self):
"return status of: zlogin zone pwd"
cmd = self.runmethod + ['pwd']
if qcall(cmd) == 0:
return True
else:
return False
def boot_and_wait_reboot(self):
"""boot zone, then wait for automatic zone reboot
boot zone
wait for zone init process end
wait for zone running
wait for zone operational
"""
self.log.info("wait for zone boot and reboot...")
self.boot()
if self.is_running is False:
raise(excError("zone is not running"))
cmd = [PGREP, "-z", self.name, "-f", INIT]
(out, err, st) = justcall(cmd)
if st != 0:
raise(excError("fail to detect zone init process"))
pids = " ".join(out.split("\n")).rstrip()
cmd = [PWAIT, pids]
self.log.info("wait for zone init process %s termination" % (pids))
if qcall(cmd) != 0:
raise(excError("failed " + " ".join(cmd)))
self.log.info("wait for zone running again")
self.wait_for_fn(self.is_up, self.startup_timeout, 2)
self.log.info("wait for zone operational")
self.wait_for_fn(self.operational, self.startup_timeout, 2)
def umount_fs_in_zonepath(self):
"""zone boot will fail if some fs linger under the zonepath.
those fs might be datasets automounted upon zpool import.
umount them.
if they are needed, them still may be mounted by opensvc
if declared as zoned fs or encap fs.
"""
self.get_zonepath()
if self.zonepath == "/":
# sanity check
return
m = __import__('rcMounts'+rcEnv.sysname)
mounts = m.Mounts()
mounts.sort(reverse=True)
mntpts = []
for resource in self.svc.get_resources('fs'):
mntpts.append(resource.mount_point)
for mount in mounts.mounts:
# don't unmount zonepath itself
if mount.mnt == self.zonepath:
continue
if not mount.mnt.startswith(self.zonepath):
continue
# don't umount fs not handled by the service
if mount.mnt not in mntpts:
continue
self.vcall(['umount', mount.mnt])
self.vcall(['rmdir', mount.mnt])
if mount.type == 'zfs':
zfs_setprop(mount.dev, 'canmount', 'noauto')
def start(self):
if not 'noaction' in self.tags:
self.attach()
self.ready()
self.svc.sub_set_action("ip", "start", tags=set([self.name]))
if not 'noaction' in self.tags:
self.boot()
self.svc.sub_set_action("disk.scsireserv", "start", tags=set([self.name]))
self.svc.sub_set_action("disk.zpool", "start", tags=set([self.name]))
self.svc.sub_set_action("disk.raw", "start", tags=set([self.name]))
self.svc.sub_set_action("fs", "start", tags=set([self.name]))
def stop(self):
self.export_zone_cfg()
self.svc.sub_set_action("fs", "stop", tags=set([self.name]))
self.svc.sub_set_action("disk.raw", "stop", tags=set([self.name]))
self.svc.sub_set_action("disk.zpool", "stop", tags=set([self.name]))
self.svc.sub_set_action("disk.scsireserv", "stop", tags=set([self.name]))
self.svc.sub_set_action("ip", "stop", tags=set([self.name]))
if not 'noaction' in self.tags:
self.halt()
self.detach()
self.delete()
def provision(self):
if not 'noaction' in self.tags:
self._provision()
self.svc.sub_set_action("disk.scsireserv", "provision", tags=set([self.name]))
self.svc.sub_set_action("disk.zpool", "provision", tags=set([self.name]))
self.svc.sub_set_action("disk.raw", "provision", tags=set([self.name]))
self.svc.sub_set_action("fs", "provision", tags=set([self.name]))
def presync(self):
self.export_zone_cfg()
def files_to_sync(self):
return [self.zone_cfg_path()]
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def _provision(self):
m = __import__("provZone")
m.ProvisioningZone(self).provisioner()
def get_container_info(self):
vcpus = "0"
vmem = "0"
cmd = [ZONECFG, "-z", self.name, "info", "rctl", "name=zone.cpu-cap"]
(out, err, status) = justcall(cmd)
if status == 0:
lines = out.split('\n')
for line in lines:
if "value:" not in line:
continue
l = line.split("limit=")
if len(l) == 2:
vcpus = l[-1][:l[-1].index(',')]
vcpus = str(float(vcpus)/100)
break
cmd = [ZONECFG, "-z", self.name, "info", "capped-memory"]
(out, err, status) = justcall(cmd)
if status == 0:
lines = out.split('\n')
for line in lines:
if "physical:" not in line:
continue
l = line.split(": ")
if len(l) == 2:
vmem = l[-1].strip()
if vmem.endswith('T'):
vmem = str(float(vmem[:-1])*1024*1024)
elif vmem.endswith('G'):
vmem = str(float(vmem[:-1])*1024)
elif vmem.endswith('M'):
vmem = vmem[:-1]
elif vmem.endswith('K'):
vmem = str(float(vmem[:-1])/1024)
break
return {'vcpus': vcpus, 'vmem': vmem}
if __name__ == "__main__":
for c in (Zone,) :
help(c)
opensvc-1.8~20170412/lib/provDiskLvHP-UX.py 0000644 0001750 0001750 00000002620 13073467726 020270 0 ustar jkelbert jkelbert from provisioning import Provisioning
from rcUtilities import justcall, which, convert_size
from rcGlobalEnv import rcEnv
from svcBuilder import conf_get_string_scope
import os
import rcExceptions as ex
import time
class ProvisioningDisk(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
def provisioner(self):
if not which('vgdisplay'):
self.r.log.error("vgdisplay command not found")
raise ex.excError
if not which('lvcreate'):
self.r.log.error("lvcreate command not found")
raise ex.excError
try:
self.size = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "size")
self.size = convert_size(self.size, _to="m")
self.vg = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "vg")
except Exception as e:
self.r.log.info("skip lv provisioning: %s" % str(e))
return
cmd = ['vgdisplay', self.vg]
out, err, ret = justcall(cmd)
if ret != 0:
self.r.log.error("volume group %s does not exist"%self.vg)
raise ex.excError
dev = os.path.basename(self.r.device)
# create the logical volume
cmd = ['lvcreate', '-n', dev, '-L', str(self.size)+'M', self.vg]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
opensvc-1.8~20170412/lib/resIpLinux.py 0000644 0001750 0001750 00000003214 13073467726 017505 0 ustar jkelbert jkelbert import resIp as Res
import rcExceptions as ex
from rcUtilitiesLinux import check_ping
from rcUtilities import which, to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, timeout=5, count=1):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, timeout=timeout, count=count)
def start_link(self):
if which("ip"):
cmd = ['ip', 'link', 'set', 'dev', self.ipdev, 'up']
else:
cmd = ['ifconfig', self.ipdev, 'up']
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', 'add', '/'.join([self.addr, to_cidr(self.mask)])]
else:
cmd = ['ifconfig', self.stacked_dev, self.addr, 'netmask', to_dotted(self.mask), 'up']
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# ip activation may still be incomplete
# wait for activation, to avoid startapp scripts to fail binding their listeners
for i in range(5, 0, -1):
if check_ping(self.addr, timeout=1, count=1):
return ret, out, err
self.log.error("timed out waiting for ip activation")
raise ex.excError
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', 'del', '/'.join([self.addr, to_cidr(self.mask)])]
else:
if self.stacked_dev is None:
return 1, "", "no stacked dev found"
cmd = ['ifconfig', self.stacked_dev, 'down']
return self.vcall(cmd)
opensvc-1.8~20170412/lib/rcDocker.py 0000644 0001750 0001750 00000075073 13073467726 017153 0 ustar jkelbert jkelbert # -*- coding: utf8 -*-
"""
The module implementing the DockerLib class.
"""
import os
from distutils.version import LooseVersion as V
import json
import re
import rcStatus
import rcExceptions as ex
from rcUtilities import which, justcall, lazy, unset_lazy
from rcGlobalEnv import rcEnv
from svcBuilder import conf_get_string_scope, conf_get_boolean_scope
os.environ['LANG'] = 'C'
class DockerLib(object):
"""
Instanciated as the 'dockerlib' Svc lazy attribute, this class abstracts
docker daemon ops.
"""
def __init__(self, svc=None):
self.svc = svc
self.max_wait_for_dockerd = 5
self.docker_info_done = False
try:
self.docker_daemon_private = \
conf_get_boolean_scope(svc, svc.config, 'DEFAULT', 'docker_daemon_private')
except ex.OptNotFound:
self.docker_daemon_private = True
if rcEnv.sysname != "Linux":
self.docker_daemon_private = False
try:
self.docker_exe_init = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'docker_exe')
except ex.OptNotFound:
self.docker_exe_init = None
try:
self.dockerd_exe_init = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'dockerd_exe')
except ex.OptNotFound:
self.dockerd_exe_init = None
try:
self.docker_data_dir = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'docker_data_dir')
except ex.OptNotFound:
self.docker_data_dir = None
try:
self.docker_daemon_args = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'docker_daemon_args').split()
except ex.OptNotFound:
self.docker_daemon_args = []
try:
self.docker_swarm_args = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'docker_swarm_args').split()
except ex.OptNotFound:
self.docker_swarm_args = []
try:
self.docker_swarm_managers = \
conf_get_string_scope(svc, svc.config, 'DEFAULT', 'docker_swarm_managers').split()
except ex.OptNotFound:
self.docker_swarm_managers = []
if self.docker_data_dir:
if "--exec-opt" not in self.docker_daemon_args and self.docker_min_version("1.7"):
self.docker_daemon_args += ["--exec-opt", "native.cgroupdriver=cgroupfs"]
if "--token" in self.docker_swarm_args:
raise ex.excError("--token must not be specified in DEFAULT.docker_swarm_args")
self.docker_var_d = os.path.join(rcEnv.pathvar, self.svc.svcname)
if not os.path.exists(self.docker_var_d):
os.makedirs(self.docker_var_d)
elif self.docker_daemon_private:
self.docker_socket = "unix://"+os.path.join(self.docker_var_d, 'docker.sock')
else:
self.docker_socket = None
if self.docker_daemon_private:
self.docker_pid_file = os.path.join(self.docker_var_d, 'docker.pid')
else:
self.docker_pid_file = None
lines = [line for line in self.docker_info.splitlines() if "Root Dir" in line]
try:
self.docker_data_dir = lines[0].split(":")[-1].strip()
except IndexError:
self.docker_data_dir = None
self.docker_cmd = [self.docker_exe]
if self.docker_socket:
self.docker_cmd += ['-H', self.docker_socket]
def get_ps(self, refresh=False):
"""
Return the 'docker ps' output from cache or from the command
execution depending on .
"""
if refresh:
unset_lazy(self, "docker_ps")
return self.docker_ps
@lazy
def docker_ps(self):
"""
The "docker ps" output.
"""
cmd = self.docker_cmd + ['ps', '-a', '--no-trunc']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return out
def docker_node_rm(self, ref):
"""
Execute "docker node rm ["
"""
cmd = self.docker_cmd + ['node', 'rm', ref]
self.svc.log.debug("remove replaced node %s" % ref)
self.svc.log.debug(" ".join(cmd))
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
@lazy
def docker_service_ls(self):
"""
The "docker service ls" output.
"""
cmd = self.docker_cmd + ['service', 'ls']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return out
def docker_service_ps(self, service):
"""
The "docker service ps ]" output.
"""
if service is None:
return ""
cmd = self.docker_cmd + ['service', 'ps', service]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return out
def service_ps_data(self, service):
lines = self.docker_service_ps(service).splitlines()
if len(lines) < 2:
return []
ids = []
for line in lines[1:]:
if "\_" in line:
# don't care about "history" lines
continue
line = line.strip().split()
if len(line) == 0:
continue
ids.append(line[0])
data = self.docker_inspect(ids)
# discard lines with left nodes
node_ids = self.node_ids()
data = [inst for inst in data if "NodeID" in inst and inst["NodeID"] in node_ids]
return data
def docker_node_ls(self):
"""
The "docker node ls" output.
"""
cmd = self.docker_cmd + ['node', 'ls', '-q']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return out
def node_ids(self):
return self.docker_node_ls().strip().splitlines()
def node_ls_data(self):
cmd = self.docker_cmd + ['node', 'inspect'] + self.node_ids()
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return json.loads(out)
def node_data(self):
cmd = self.docker_cmd + ['node', 'inspect', rcEnv.nodename]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
return json.loads(out)[0]
@lazy
def service_ls_data(self):
"""
A hash of services data as found in "docker service ls",
indexed by service name.
"""
lines = self.docker_service_ls.splitlines()
if len(lines) < 2:
return
header = lines[0].strip().split()
try:
service_id_idx = header.index('ID')
service_name_idx = header.index('NAME')
service_mode_idx = header.index('MODE')
service_replicas_idx = header.index('REPLICAS')
service_image_idx = header.index('IMAGE')
except (IndexError, ValueError):
return
ref_len = len(header)
data = {}
for line in lines[1:]:
line = line.strip().split()
if len(line) != ref_len:
continue
service_name = line[service_name_idx].strip()
data[service_name] = {
"name": service_name,
"id": line[service_id_idx].strip(),
"mode": line[service_mode_idx].strip(),
"replicas": line[service_replicas_idx].strip().split("/"),
"image": line[service_image_idx].strip(),
}
return data
@lazy
def container_id_by_name(self):
"""
A hash of instances data as found in "docker ps", indexed by
instance id.
"""
lines = self.docker_ps.splitlines()
if len(lines) < 2:
return
try:
start = lines[0].index('NAMES')
except (IndexError, ValueError):
return
data = {}
for line in lines[1:]:
if len(line.strip()) == 0:
continue
try:
names = line[start:].strip().split(',')
except IndexError:
continue
for name in names:
# swarm names are preffixed by /
elements = name.split("/")
container_name = elements[-1]
if len(elements) == 2:
swarm_node = elements[0]
else:
swarm_node = None
data[container_name] = {
"id": line.split()[0],
"swarm_node": swarm_node,
}
return data
def get_container_id_by_name(self, resource, refresh=False):
"""
Return the container id for the container resource.
Lookup in docker ps by docker name .container. where
is the identifier part of the resource id.
"""
if refresh:
unset_lazy(self, "docker_ps")
unset_lazy(self, "container_id_by_name")
if resource.docker_service:
prefix = resource.service_name+"."
if self.container_id_by_name is None:
return
running = self.get_running_instance_ids()
for container_name, data in self.container_id_by_name.items():
if data["id"] not in running:
continue
if container_name.startswith(prefix):
return data["id"]
else:
if self.container_id_by_name is None or \
resource.container_name not in self.container_id_by_name:
return
data = self.container_id_by_name[resource.container_name]
return data["id"]
def get_service_id_by_name(self, resource, refresh=False):
"""
Return the service id for the container resource.
Lookup in docker service ls by docker name _container_
where is the identifier part of the resource id.
"""
if refresh:
unset_lazy(self, "docker_service_ls")
unset_lazy(self, "service_ls_data")
if self.service_ls_data is None or \
resource.service_name not in self.service_ls_data:
return
data = self.service_ls_data[resource.service_name]
return data["id"]
@lazy
def docker_info(self):
"""
The output of "docker info".
"""
cmd = [self.docker_exe, "info"]
return justcall(cmd)[0]
@lazy
def docker_version(self):
"""
The docker version.
"""
cmd = [self.docker_exe, "--version"]
out = justcall(cmd)[0]
elements = out.split()
if len(elements) < 3:
return False
return elements[2].rstrip(",")
def docker_min_version(self, version):
"""
Return True if the docker version is at least .
"""
if V(self.docker_version) >= V(version):
return True
return False
def get_running_service_ids(self, refresh=False):
"""
Return the list of running docker services id.
"""
if refresh:
unset_lazy(self, "running_service_ids")
unset_lazy(self, "docker_service_ls")
unset_lazy(self, "service_ls_data")
return self.running_service_ids
@lazy
def running_service_ids(self):
"""
The list of running docker services id.
"""
if self.service_ls_data is None:
return []
return [service["id"] for service in self.service_ls_data.values()]
def get_running_instance_ids(self, refresh=False):
"""
Return the list of running docker instances id.
"""
if refresh:
unset_lazy(self, "running_instance_ids")
return self.running_instance_ids
@lazy
def running_instance_ids(self):
"""
The list of running docker instances id.
"""
cmd = self.docker_cmd + ['ps', '-q', '--no-trunc']
out = justcall(cmd)[0]
return out.replace('\n', ' ').split()
def get_run_image_id(self, resource, run_image=None):
"""
Return the full docker image id
"""
if run_image is None and hasattr(resource, "run_image"):
run_image = resource.run_image
if len(run_image) == 12 and re.match('^[a-f0-9]*$', run_image):
return run_image
if run_image.startswith("sha256:"):
return run_image
try:
image_name, image_tag = run_image.split(':')
except ValueError:
return
if self.docker_min_version("1.13"):
data = self.docker_image_inspect(run_image)
if data is None:
self.docker_pull(run_image)
data = self.docker_image_inspect(run_image)
if data is None:
raise ValueError("image %s not pullable" % run_image)
return data["Id"]
cmd = self.docker_cmd + ['images', '--no-trunc', image_name]
results = justcall(cmd)
if results[2] != 0:
return run_image
for line in results[0].splitlines():
elements = line.split()
if len(elements) < 3:
continue
if elements[0] == image_name and elements[1] == image_tag:
return elements[2]
return run_image
def docker_pull(self, ref):
self.svc.log.info("pulling docker image %s" % ref)
cmd = self.docker_cmd + ['pull', ref]
results = justcall(cmd)
if results[2] != 0:
raise ex.excError(results[1])
@lazy
def images(self):
"""
The hash of docker images, indexed by image id.
"""
cmd = self.docker_cmd + ['images', '--no-trunc']
results = justcall(cmd)
if results[2] != 0:
return
data = {}
for line in results[0].splitlines():
elements = line.split()
if len(elements) < 3:
continue
if elements[2] == "IMAGE":
continue
data[elements[2]] = elements[0]+':'+elements[1]
return data
def info(self):
"""
Return the keys contributed to resinfo.
"""
if self.docker_info_done:
return []
data = []
data += self._docker_info_version()
data += self._docker_info_drivers()
data += self._docker_info_images()
return data
def _docker_info_version(self):
"""
Return the docker version key conttributed to resinfo.
"""
return [[
"",
"docker_version",
self.docker_version
]]
def _docker_info_drivers(self):
"""
Return the docker drivers keys conttributed to resinfo.
"""
data = []
lines = self.docker_info.splitlines()
for line in lines:
elements = line.split(": ")
if len(elements) < 2:
continue
if elements[0] == "Storage Driver":
data.append(["", "storage_driver", elements[1]])
if elements[0] == "Execution Driver":
data.append(["", "exec_driver", elements[1]])
return data
def _docker_info_images(self):
"""
Return the per-container resource resinfo keys.
"""
data = []
images_done = []
# referenced images
for resource in self.svc.get_resources("container.docker"):
image_id = self.get_run_image_id(resource)
images_done.append(image_id)
data.append([resource.rid, "run_image", resource.run_image])
data.append([resource.rid, "docker_image_id", image_id])
data.append([resource.rid, "docker_instance_id", resource.container_id])
# unreferenced images
for image_id in self.images:
if image_id in images_done:
continue
data.append(["", "docker_image_id", image_id])
self.docker_info_done = True
return data
def image_userfriendly_name(self, resource):
"""
Return the container resource docker image name if possible,
else return the image id.
"""
if ':' in resource.run_image:
return resource.run_image
if self.images is None:
return resource.run_image
if resource.run_image in self.images:
return self.images[resource.run_image]
return resource.run_image
def docker_inspect(self, container_id):
"""
Return the "docker inspect" data dict.
"""
if isinstance(container_id, list):
cmd = self.docker_cmd + ['inspect'] + container_id
out = justcall(cmd)[0]
data = json.loads(out)
return data
else:
cmd = self.docker_cmd + ['inspect', container_id]
out = justcall(cmd)[0]
data = json.loads(out)
return data[0]
def docker_service_inspect(self, service_id):
"""
Return the "docker service inspect" data dict.
"""
cmd = self.docker_cmd + ['service', 'inspect', service_id]
out = justcall(cmd)[0]
data = json.loads(out)
return data[0]
def docker_image_inspect(self, image_id):
"""
Return the "docker image inspect" data dict.
"""
cmd = self.docker_cmd + ['image', 'inspect', image_id]
out = justcall(cmd)[0]
data = json.loads(out)
if len(data) == 0:
return
return data[0]
def repotag_to_image_id(self, repotag):
data = self.docker_image_inspect(repotag)
if data is None:
return
return data["Id"]
def docker_stop(self):
"""
Stop the docker daemon if possible.
"""
def can_stop():
"""
Return True if the docker daemon can be stopped.
"""
if not self.docker_daemon_private:
return False
if not self.docker_running():
return False
if self.docker_data_dir is None:
return False
if not os.path.exists(self.docker_pid_file):
return False
if len(self.get_running_instance_ids(refresh=True)) > 0:
return False
return True
if not can_stop():
return
try:
with open(self.docker_pid_file, 'r') as ofile:
pid = int(ofile.read())
except (OSError, IOError):
self.svc.log.warning("can't read %s. skip docker daemon kill",
self.docker_pid_file)
return
self.svc.log.info("no more container handled by docker daemon (pid %d)."
" shut it down", pid)
import signal
import time
tries = 10
os.kill(pid, signal.SIGTERM)
while self.docker_running() and tries > 0:
tries -= 1
time.sleep(1)
if tries == 0:
self.svc.log.warning("dockerd did not stop properly. send a kill "
"signal")
os.kill(pid, signal.SIGKILL)
@lazy
def dockerd_cmd(self):
"""
The docker daemon startup command, adapted to the docker version.
"""
if self.docker_min_version("1.13"):
cmd = [
self.dockerd_exe,
'-H', self.docker_socket,
'-g', self.docker_data_dir,
'-p', self.docker_pid_file
]
elif self.docker_min_version("1.8"):
cmd = [
self.docker_exe, 'daemon',
'-H', self.docker_socket,
'-g', self.docker_data_dir,
'-p', self.docker_pid_file
]
else:
cmd = self.docker_cmd + [
'-r=false', '-d',
'-g', self.docker_data_dir,
'-p', self.docker_pid_file
]
if self.docker_min_version("1.9") and '--exec-root' not in str(self.docker_daemon_args):
cmd += ["--exec-root", self.docker_data_dir]
cmd += self.docker_daemon_args
return cmd
def _docker_data_dir_resource(self):
"""
Return the service fs resource handling the docker data dir, or
None if any.
"""
mntpts = []
mntpt_res = {}
for resource in self.svc.get_resources('fs'):
mntpts.append(resource.mount_point)
mntpt_res[resource.mount_point] = resource
for mntpt in sorted(mntpts, reverse=True):
if mntpt.startswith(self.docker_data_dir):
return mntpt_res[mntpt]
def docker_start(self, verbose=True):
"""
Start the docker daemon if in private mode and not already running.
"""
if not self.docker_daemon_private:
return
import lock
lockfile = os.path.join(rcEnv.pathlock, 'docker_start')
try:
lockfd = lock.lock(timeout=15, delay=1, lockfile=lockfile)
except lock.LOCK_EXCEPTIONS as exc:
self.svc.log.error("dockerd start lock acquire failed: %s",
str(exc))
return
# Sanity checks before deciding to start the daemon
if self.docker_running():
lock.unlock(lockfd)
return
if self.docker_data_dir is None:
lock.unlock(lockfd)
return
resource = self._docker_data_dir_resource()
if resource is not None:
state = resource._status()
if state not in (rcStatus.UP, rcStatus.STDBY_UP):
self.svc.log.warning("the docker daemon data dir is handled by the %s "
"resource in %s state. can't start the docker "
"daemon", resource.rid, rcStatus.Status(state))
lock.unlock(lockfd)
return
if os.path.exists(self.docker_pid_file):
self.svc.log.warning("removing leftover pid file %s", self.docker_pid_file)
os.unlink(self.docker_pid_file)
# Now we can start the daemon, creating its data dir if necessary
cmd = self.dockerd_cmd
if verbose:
self.svc.log.info("starting docker daemon")
self.svc.log.info(" ".join(cmd))
import subprocess
subprocess.Popen(
['nohup'] + cmd,
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'a'),
preexec_fn=os.setpgrp
)
import time
try:
for _ in range(self.max_wait_for_dockerd):
if self._docker_working():
return
time.sleep(1)
finally:
lock.unlock(lockfd)
def docker_running(self):
"""
Return True if the docker daemon is running.
"""
if self.docker_daemon_private:
return self._docker_running_private()
else:
return self._docker_running_shared()
def _docker_running_shared(self):
"""
Return True if the docker daemon is running.
"""
if self.docker_info == "":
return False
return True
def _docker_running_private(self):
"""
Return True if the docker daemon is running.
"""
if not os.path.exists(self.docker_pid_file):
self.svc.log.debug("docker_running: no pid file %s", self.docker_pid_file)
return False
try:
with open(self.docker_pid_file, "r") as ofile:
buff = ofile.read()
except IOError as exc:
if exc.errno == 2:
return False
return ex.excError("docker_running: "+str(exc))
self.svc.log.debug("docker_running: pid found in pid file %s", buff)
exe = os.path.join(os.sep, "proc", buff, "exe")
try:
exe = os.path.realpath(exe)
except OSError:
self.svc.log.debug("docker_running: no proc info in /proc/%s", buff)
try:
os.unlink(self.docker_pid_file)
except OSError:
pass
return False
if "docker" not in exe:
self.svc.log.debug("docker_running: pid found but owned by a "
"process that is not a docker (%s)", exe)
try:
os.unlink(self.docker_pid_file)
except OSError:
pass
return False
return True
def _docker_working(self):
"""
Return True if the docker daemon responds to a simple 'info' request.
"""
cmd = self.docker_cmd + ['info']
ret = justcall(cmd)[2]
if ret != 0:
return False
return True
@lazy
def docker_exe(self):
"""
Return the docker executable to use, using the service configuration
docker_exe as the first choice, and a docker.io or docker exe found
in PATH as a fallback.
"""
if self.docker_exe_init and which(self.docker_exe_init):
return self.docker_exe_init
elif which("docker.io"):
return "docker.io"
elif which("docker"):
return "docker"
else:
raise ex.excInitError("docker executable not found")
@lazy
def dockerd_exe(self):
if self.dockerd_exe_init and which(self.dockerd_exe_init):
return self.dockerd_exe_init
elif which("dockerd"):
return "dockerd"
else:
raise ex.excInitError("dockerd executable not found")
def join_token(self, ttype):
self.docker_start()
cmd = self.docker_cmd + ["swarm", "join-token", ttype]
results = justcall(cmd)
if results[2] != 0:
raise ex.excError(results[1])
token = None
for line in results[0].splitlines():
if "--token" in line:
token = line.split()[1]
continue
if token and ":" in line:
addr = line.strip()
return {"token": token, "addr": addr}
raise ex.excError("unable to determine the swarm worker join token")
def swarm_initialized(self):
if self.swarm_node_role == "none":
return False
return True
def join_token_dump_file(self, ttype):
return os.path.join(rcEnv.paths.pathvar, self.svc.svcname, "swarm_" + ttype + "_join_token")
def dump_join_tokens(self):
for ttype in ("manager", "worker"):
with open(self.join_token_dump_file(ttype), "w") as fp:
fp.write(json.dumps(self.join_token(ttype)))
def load_join_token(self, ttype):
fpath = self.join_token_dump_file(ttype)
if not os.path.exists(fpath):
raise ex.excError("the join token has not been transfered by the flex primary node")
with open(fpath, "r") as fp:
data = json.load(fp)
return data
@lazy
def files_to_sync(self):
fpaths = []
self.dump_join_tokens()
for ttype in ("manager", "worker"):
fpath = self.join_token_dump_file(ttype)
if os.path.exists(fpath):
fpaths.append(fpath)
return fpaths
def init_swarm(self):
if self.swarm_initialized():
return
if rcEnv.nodename == self.svc.flex_primary:
self.init_swarm_leader()
elif rcEnv.nodename in self.docker_swarm_managers:
self.init_swarm_manager()
else:
self.init_swarm_worker()
unset_lazy(self, "swarm_node_role")
def init_swarm_leader(self):
cmd = self.docker_cmd + ['swarm', 'init']
if len(self.docker_swarm_args) > 0:
cmd += self.docker_swarm_args
self.svc.log.info(" ".join(cmd))
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
def init_swarm_manager(self):
data = self.load_join_token("manager")
cmd = self.docker_cmd + ['swarm', 'join', '--token', data["token"], data["addr"]]
if len(self.docker_swarm_args) > 0:
cmd += self.docker_swarm_args
self.svc.log.info(" ".join(cmd))
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
def init_swarm_worker(self):
data = self.load_join_token("worker")
cmd = self.docker_cmd + ['swarm', 'join', '--token', data["token"], data["addr"]]
if len(self.docker_swarm_args) > 0:
cmd += self.docker_swarm_args
self.svc.log.info(" ".join(cmd))
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError(err)
def docker_swarm_leave(self):
if self.swarm_node_role == "none":
return
cmd = self.docker_cmd + ['swarm', 'leave']
ret, out, err = self.svc.vcall(cmd)
if ret != 0:
raise ex.excError(err)
unset_lazy(self, "swarm_node_role")
@lazy
def swarm_node_role(self):
"""
Return
* none : no role in the swarm, not joined yet
* worker
* leader
* reachable
"""
if not self.docker_running():
return "none"
cmd = self.docker_cmd + ['node', 'ls']
out, err, ret = justcall(cmd)
if ret != 0:
if "docker swarm" in err:
return "none"
else:
return "worker"
for line in out.splitlines():
line = line.replace(" * ", " ")
line = line.strip().split()
if len(line) < 4:
continue
if line[1] != rcEnv.nodename:
continue
if line[-1] in ("Leader", "Reachable"):
return line[-1].lower()
else:
return "unknown"
def nodes_purge(self):
"""
Remove lingering nodes, ie those in down state and
with an active instance matching the hostname.
"""
if self.swarm_node_role != "leader":
return
nodes = self.node_ls_data()
down = {}
for node in nodes:
nodename = node["Description"]["Hostname"]
if node["Status"]["State"] != "down":
continue
if nodename not in down:
down[nodename] = []
down[nodename].append(node["ID"])
for node in nodes:
nodename = node["Description"]["Hostname"]
if node["Status"]["State"] != "ready":
continue
if nodename not in down:
continue
for node_id in down[nodename]:
self.docker_node_rm(node_id)
opensvc-1.8~20170412/lib/checkEthHP-UX.py 0000644 0001750 0001750 00000005355 13073467726 017713 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
import glob
import rcEthtool
class check(checks.check):
chk_type = "eth"
def do_check(self):
cmd = ["lanscan", "-q"]
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
r = []
intf = set([])
for line in out.split("\n"):
if len(line) == 0:
continue
l = line.split()
n = len(l)
if n == 1:
# add interfaces with an inet config
if self.has_inet(l[0]):
intf.add(l[0])
elif n > 1:
# add slaves for apa with an inet config
if self.has_inet(l[0]):
for w in l[1:]:
intf.add(w)
else:
continue
for i in intf:
r += self.do_check_intf(i)
return r
def has_inet(self, intf):
cmd = ["ifconfig", "lan"+intf]
out, err, ret = justcall(cmd)
if ret != 0:
return False
if 'inet' in out:
return True
return False
def do_check_intf(self, intf):
r = []
cmd = ["lanadmin", "-x", intf]
out, err, ret = justcall(cmd)
if ret != 0:
return []
intf = "lan"+intf
inst = intf + ".link"
if "link is down" in out:
val = "0"
else:
val = "1"
r.append({
'chk_instance': inst,
'chk_value': val,
'chk_svcname': '',
})
inst = intf + ".speed"
val = "0"
for line in out.split('\n'):
if "Speed" not in line:
continue
try:
val = line.split()[2]
except:
pass
r.append({
'chk_instance': inst,
'chk_value': val,
'chk_svcname': '',
})
inst = intf + ".autoneg"
val = "0"
for line in out.split('\n'):
if "Autoneg" not in line:
continue
if " On":
val = "1"
r.append({
'chk_instance': inst,
'chk_value': val,
'chk_svcname': '',
})
inst = intf + ".duplex"
val = '0'
for line in out.split('\n'):
if "Speed" not in line:
continue
if 'Full-Duplex' in line:
val = "1"
r.append({
'chk_instance': inst,
'chk_value': val,
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/rcNetapp.py 0000644 0001750 0001750 00000005333 13073467726 017163 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import rcExceptions as ex
import ConfigParser
from subprocess import *
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
if rcEnv.pathbin not in os.environ['PATH']:
os.environ['PATH'] += ":"+rcEnv.pathbin
class Netapps(object):
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
self.arrays = []
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
m = {}
for s in conf.sections():
if not conf.has_option(s, "type") or \
conf.get(s, "type") != "netapp":
continue
if self.filtering and not s in self.objects:
continue
server = None
username = None
password = None
kwargs = {}
for key in ("server", "username", "key"):
try:
kwargs[key] = conf.get(s, key)
except:
print("missing parameter: %s", s)
continue
self.arrays.append(Netapp(s, **kwargs))
del(conf)
def __iter__(self):
for array in self.arrays:
yield(array)
class Netapp(object):
def __init__(self, name, server=None, username=None, key=None):
self.name = name
self.server = server
self.username = username
self.key = key
self.keys = [
'aggr_show_space',
'lun_show_v',
'lun_show_m',
'sysconfig_a',
'df',
'df_S',
'fcp_show_adapter',
]
def rcmd(self, cmd):
cmd = ["ssh", "-o", "StrictHostKeyChecking=no", "-i", self.key, self.username+"@"+self.server, cmd]
out, err, ret = justcall(cmd)
return out, err
def get_aggr_show_space(self):
out, err = self.rcmd("aggr show_space -m")
return out
def get_lun_show_v(self):
out, err = self.rcmd("lun show -v")
return out
def get_lun_show_m(self):
out, err = self.rcmd("lun show -m")
return out
def get_sysconfig_a(self):
out, err = self.rcmd("sysconfig -a")
return out
def get_df(self):
out, err = self.rcmd("df")
return out
def get_df_S(self):
out, err = self.rcmd("df -S")
return out
def get_fcp_show_adapter(self):
out, err = self.rcmd("fcp show adapter")
return out
if __name__ == "__main__":
o = Netapps()
for netapp in o:
print(netapp.get_aggr_show_space())
break
opensvc-1.8~20170412/lib/provFsZfs.py 0000644 0001750 0001750 00000002662 13073467726 017353 0 ustar jkelbert jkelbert import os
import provFs
from rcUtilities import which, convert_size
from rcZfs import Dataset
from svcBuilder import conf_get_string_scope
class ProvisioningFs(provFs.ProvisioningFs):
def unprovision_dev(self):
if not which('zfs'):
self.r.log.error("zfs command not found")
raise ex.excError
ds = Dataset(self.r.device, log=self.r.log)
if ds.exists():
ds.destroy(["-r"])
if os.path.exists(self.r.mount_point) and os.path.isdir(self.r.mount_point):
os.rmdir(self.r.mount_point)
def provision_dev(self):
if not which('zfs'):
self.r.log.error("zfs command not found")
raise ex.excError
ds = Dataset(self.r.device, log=self.r.log)
if ds.exists() is False:
ds.create(['-p', '-o', 'mountpoint='+self.r.mount_point, '-o', 'canmount=noauto'])
nv_list = dict()
try:
size = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "size")
except:
size = None
if size:
nv_list['refquota'] = "%dM" % convert_size(size, _to="m")
ds.verify_prop(nv_list)
def provisioner(self):
self.provision_dev()
self.r.log.info("provisioned")
self.r.start()
return True
def unprovisioner(self):
self.r.stop()
self.unprovision_dev()
self.r.log.info("unprovisioned")
return True
opensvc-1.8~20170412/lib/rcIfconfigDarwin.py 0000644 0001750 0001750 00000005564 13073467726 020633 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilities import which
import rcIfconfig
class ifconfig(rcIfconfig.ifconfig):
def parse(self, out):
prev = ''
prevprev = ''
for w in out.split():
if 'flags=' in w:
i = rcIfconfig.interface(prev.replace(':',''))
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mtu = ''
i.ipaddr = []
i.mask = []
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
flags = w.split('<')[1].split('>')[0].split(',')
if 'UP' in flags:
i.flag_up = True
if 'BROADCAST' in flags:
i.flag_broadcast = True
if 'RUNNING' in flags:
i.flag_running = True
if 'MULTICAST' in flags:
i.flag_multicast = True
if 'LOOPBACK' in flags:
i.flag_loopback = True
elif 'inet' == prev:
i.ipaddr += [w]
elif 'inet6' == prev:
i.ip6addr += [w.split('%')[0]]
elif 'netmask' == prev:
i.mask += [w]
elif 'prefixlen' == prev:
i.ip6mask += [w]
elif 'ether' == prev:
i.hwaddr = w
prevprev = prev
prev = w
def get_mcast(self):
if which('netstat'):
cmd = ['netstat', '-gn']
out = Popen(cmd, stdout=PIPE).communicate()[0]
return self.parse_mcast_netstat(out)
def parse_mcast_netstat(self, out):
lines = out.split('\n')
found = False
data = {}
for i, line in enumerate(lines):
if line.startswith('IPv4 Multicast'):
found = True
break
if not found:
return data
if len(lines) == i+1:
return data
lines = lines[i+2:]
for line in lines:
if line.startswith('IPv6 Multicast') or line.startswith('Group'):
continue
try:
addr, lladdr, intf = line.split()
except:
continue
if intf not in data:
data[intf] = [addr]
else:
data[intf] += [addr]
return data
def __init__(self, mcast=False):
self.intf = []
if mcast:
self.mcast_data = self.get_mcast()
else:
self.mcast_data = {}
out = Popen(['ifconfig', '-a'], stdout=PIPE).communicate()[0]
self.parse(out)
opensvc-1.8~20170412/lib/resIpSunOS.py 0000644 0001750 0001750 00000001420 13073467726 017412 0 ustar jkelbert jkelbert import resIp as Res
from subprocess import *
from rcUtilitiesSunOS import check_ping
import rcExceptions as ex
class Ip(Res.Ip):
"""
SunOS ip resource driver.
"""
def arp_announce(self):
"""
Noop becauce the arp_announce job is done by SunOS ifconfig
"""
return
def check_ping(self, count=1, timeout=2):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, timeout=timeout)
def startip_cmd(self):
cmd=['/usr/sbin/ifconfig', self.stacked_dev, 'plumb', self.addr, \
'netmask', '+', 'broadcast', '+', 'up']
return self.vcall(cmd)
def stopip_cmd(self):
cmd = ['/usr/sbin/ifconfig', self.stacked_dev, 'unplumb']
return self.vcall(cmd)
opensvc-1.8~20170412/lib/checkFsUsageSunOS.py 0000644 0001750 0001750 00000002720 13073467726 020667 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
r = []
for t in ['ufs', 'vxfs']:
r += self._do_check(t)
return r
def _do_check(self, t):
cmd = ['df', '-F', t, '-k']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) == 5:
l = [''] + l
elif len(l) != 6:
continue
svcname = self.find_svc(l[5])
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': svcname,
})
r.append({
'chk_instance': l[5]+".free",
'chk_value': l[3],
'chk_svcname': svcname,
})
r.append({
'chk_instance': l[5]+".size",
'chk_value': l[1],
'chk_svcname': svcname,
})
return r
opensvc-1.8~20170412/lib/rcDevTreeWindows.py 0000644 0001750 0001750 00000001153 13073467726 020641 0 ustar jkelbert jkelbert import rcDevTree
import wmi
class DevTree(rcDevTree.DevTree):
def load_diskdrive(self):
if not hasattr(self, 'wmi'):
self.wmi = wmi.WMI()
for drive in self.wmi.WIN32_DiskDrive():
d = self.add_dev(drive.DeviceId, int(drive.size)//1024, "linear")
d.set_devpath(drive.DeviceId)
def load(self, di=None):
self.load_diskdrive()
def blacklist(self, devname):
return False
if __name__ == "__main__":
tree = DevTree()
tree.load()
#print(tree)
tree.print_tree_bottom_up()
#print(map(lambda x: x.alias, tree.get_top_devs()))
opensvc-1.8~20170412/lib/provFsXfs.py 0000644 0001750 0001750 00000000200 13073467726 017333 0 ustar jkelbert jkelbert import provFs
class ProvisioningFs(provFs.ProvisioningFs):
info = ['xfs_admin', '-l']
mkfs = ['mkfs.xfs', '-f', '-q']
opensvc-1.8~20170412/lib/checkZfsUsageFreeBSD.py 0000777 0001750 0001750 00000000000 13073467726 025307 2checkZfsUsageSunOS.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resFsAIX.py 0000644 0001750 0001750 00000010654 13073467726 017035 0 ustar jkelbert jkelbert import os
import rcMountsAIX as rcMounts
import resFs as Res
from rcUtilities import qcall, protected_mount, getmount
from rcGlobalEnv import rcEnv
import rcExceptions as ex
from stat import *
def try_umount(self):
cmd = ['umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
# don't try to kill process using the source of a
# protected bind mount
if protected_mount(self.mount_point):
return 1
# best effort kill of all processes that might block
# the umount operation. The priority is given to mass
# action reliability, ie don't contest oprator's will
cmd = ['sync']
(ret, out, err) = self.vcall(cmd, err_to_info=True)
for i in range(4):
cmd = ['fuser', '-k', '-x', '-c', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
self.log.info('umount %s'%self.mount_point)
cmd = ['umount', self.mount_point]
ret = qcall(cmd)
if ret == 0:
break
return ret
class Mount(Res.Mount):
"""
AIX fs resource driver.
"""
def __init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size=None,
**kwargs):
self.mounts = None
Res.Mount.__init__(self,
rid,
mount_point=mount_point,
device=device,
fs_type=fs_type,
mount_options=mount_options,
snap_size=snap_size,
**kwargs)
self.fsck_h = {
'jfs': {
'bin': 'fsck',
'cmd': ['fsck', '-p', '-V', 'jfs', self.device]
},
'jfs2': {
'bin': 'fsck',
'cmd': ['fsck', '-p', '-V', 'jfs2', self.device]
},
}
def is_up(self):
self.mounts = rcMounts.Mounts()
return self.mounts.has_mount(self.device, self.mount_point)
def realdev(self):
try:
mode = os.stat(self.device)[ST_MODE]
except:
self.log.debug("can not stat %s" % self.device)
return None
if S_ISBLK(mode):
dev = self.device
else:
mnt = getmount(self.device)
if self.mounts is None:
self.mounts = rcMounts.Mounts()
m = self.mounts.has_param("mnt", mnt)
if m is None:
self.log.debug("can't find dev %(dev)s mounted in %(mnt)s in mnttab"%dict(mnt=mnt, dev=self.device))
return None
dev = m.dev
return dev
def mplist(self):
dev = self.realdev()
if dev is None:
return set([])
return self._mplist([dev])
def _mplist(self, devs):
mps = set([])
return mps
def disklist(self):
dev = self.realdev()
if dev is None:
return set([])
return set([])
def can_check_writable(self):
return True
def start(self):
if self.mounts is None:
self.mounts = rcMounts.Mounts()
Res.Mount.start(self)
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return 0
self.fsck()
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
if self.fs_type != "":
fstype = ['-v', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
cmd = ['mount']+fstype+mntopt+[self.device, self.mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.mounts = None
self.can_rollback = True
def stop(self):
if self.mounts is None:
self.mounts = rcMounts.Mounts()
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return
for i in range(3):
ret = try_umount(self)
if ret == 0: break
if ret != 0:
self.log.error('failed to umount %s'%self.mount_point)
raise ex.excError
self.mounts = None
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/svcBuilder.py 0000644 0001750 0001750 00000360527 13073467726 017522 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import sys
import logging
import re
import socket
import glob
import ast
import operator as op
import platform
from rcGlobalEnv import rcEnv, Storage
from rcNode import discover_node
import rcLogger
import resSyncRsync
import rcExceptions as ex
import rcConfigParser
from rcUtilities import convert_bool, convert_size, cmdline2list, ximport, \
check_privs
# supported operators in arithmetic expressions
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg, ast.FloorDiv: op.floordiv, ast.Mod: op.mod}
if 'PATH' not in os.environ:
os.environ['PATH'] = ""
os.environ['LANG'] = 'C'
os.environ['PATH'] += ':/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
def eval_expr(expr):
""" arithmetic expressions evaluator
"""
def eval_(node):
if isinstance(node, ast.Num): #
return node.n
elif isinstance(node, ast.BinOp): #
return operators[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp): # e.g., -1
return operators[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
return eval_(ast.parse(expr, mode='eval').body)
def handle_reference(svc, conf, ref, scope=False, impersonate=None):
# hardcoded references
if ref == "nodename":
return rcEnv.nodename
if ref == "short_nodename":
return rcEnv.nodename.split(".")[0]
if ref == "svcname":
return svc.svcname
if ref == "short_svcname":
return svc.svcname.split(".")[0]
if ref == "svcmgr":
return rcEnv.svcmgr
if ref == "nodemgr":
return rcEnv.nodemgr
if "[" in ref and ref.endswith("]"):
i = ref.index("[")
index = ref[i+1:-1]
ref = ref[:i]
index = int(handle_references(svc, conf, index, scope=scope, impersonate=impersonate))
else:
index = None
# use DEFAULT as the implicit section
n_dots = ref.count(".")
if n_dots == 0:
_section = "DEFAULT"
_v = ref
elif n_dots == 1:
_section, _v = ref.split(".")
else:
raise ex.excError("%s: reference can have only one dot" % ref)
if len(_section) == 0:
raise ex.excError("%s: reference section can not be empty" % ref)
if len(_v) == 0:
raise ex.excError("%s: reference option can not be empty" % ref)
if _v[0] == "#":
return_length = True
_v = _v[1:]
else:
return_length = False
val = _handle_reference(svc, conf, ref, _section, _v, scope=scope, impersonate=impersonate)
if return_length:
return str(len(val.split()))
if not index is None:
return val.split()[index]
return val
def _handle_reference(svc, conf, ref, _section, _v, scope=False, impersonate=None):
# give os env precedence over the env cf section
if _section == "env" and _v.upper() in os.environ:
return os.environ[_v.upper()]
if _section != "DEFAULT" and not conf.has_section(_section):
raise ex.excError("%s: section %s does not exist" % (ref, _section))
try:
return conf_get(svc, conf, _section, _v, "string", scope=scope, impersonate=impersonate)
except ex.OptNotFound as e:
raise ex.excError("%s: unresolved reference (%s)" % (ref, str(e)))
raise ex.excError("%s: unknown reference" % ref)
def _handle_references(svc, conf, s, scope=False, impersonate=None):
while True:
m = re.search(r'{\w*[\w#][\w\.\[\]]*}', s)
if m is None:
return s
ref = m.group(0).strip("{}")
val = handle_reference(svc, conf, ref, scope=scope, impersonate=impersonate)
s = s[:m.start()] + val + s[m.end():]
def _handle_expressions(s):
while True:
m = re.search(r'\$\((.+)\)', s)
if m is None:
return s
expr = m.group(1)
val = eval_expr(expr)
s = s[:m.start()] + str(val) + s[m.end():]
def handle_references(svc, conf, s, scope=False, impersonate=None):
key = (s, scope, impersonate)
if hasattr(svc, "ref_cache") and svc.ref_cache is not None and key in svc.ref_cache:
return svc.ref_cache[key]
try:
val = _handle_references(svc, conf, s, scope=scope, impersonate=impersonate)
val = _handle_expressions(val)
val = _handle_references(svc, conf, val, scope=scope, impersonate=impersonate)
except Exception as e:
raise ex.excError("%s: reference evaluation failed: %s" %(s, str(e)))
if hasattr(svc, "ref_cache") and svc.ref_cache is not None:
svc.ref_cache[key] = val
return val
def conf_get(svc, conf, s, o, t, scope=False, impersonate=None):
if not scope:
val = conf_get_val_unscoped(svc, conf, s, o)
else:
val = conf_get_val_scoped(svc, conf, s, o, impersonate=impersonate)
try:
val = handle_references(svc, conf, val, scope=scope, impersonate=impersonate)
except ex.excError:
if o.startswith("pre_") or o.startswith("post_") or o.startswith("blocking_"):
pass
else:
raise
if t == 'string':
pass
elif t == 'boolean':
val = convert_bool(val)
elif t == 'integer':
try:
val = int(val)
except:
val = convert_size(val)
else:
raise Exception("unknown keyword type: %s" % t)
return val
def conf_get_val_unscoped(svc, conf, s, o):
if conf.has_option(s, o):
return conf.get(s, o)
raise ex.OptNotFound("unscoped keyword %s.%s not found" % (s, o))
def conf_get_val_scoped(svc, conf, s, o, impersonate=None):
if impersonate is None:
nodename = rcEnv.nodename
else:
nodename = impersonate
if conf.has_option(s, o+"@"+nodename):
val = conf.get(s, o+"@"+nodename)
elif conf.has_option(s, o+"@nodes") and \
nodename in svc.nodes:
val = conf.get(s, o+"@nodes")
elif conf.has_option(s, o+"@drpnodes") and \
nodename in svc.drpnodes:
val = conf.get(s, o+"@drpnodes")
elif conf.has_option(s, o+"@encapnodes") and \
nodename in svc.encapnodes:
val = conf.get(s, o+"@encapnodes")
elif conf.has_option(s, o+"@flex_primary") and \
nodename == svc.flex_primary:
val = conf.get(s, o+"@flex_primary")
elif conf.has_option(s, o+"@drp_flex_primary") and \
nodename == svc.drp_flex_primary:
val = conf.get(s, o+"@drp_flex_primary")
elif conf.has_option(s, o):
try:
val = conf.get(s, o)
except Exception as e:
raise ex.excError("param %s.%s: %s"%(s, o, str(e)))
else:
raise ex.OptNotFound("scoped keyword %s.%s not found" % (s, o))
return val
def conf_get_string(svc, conf, s, o):
return conf_get(svc, conf, s, o, 'string', scope=False)
def conf_get_string_scope(svc, conf, s, o, impersonate=None):
return conf_get(svc, conf, s, o, 'string', scope=True, impersonate=impersonate)
def conf_get_boolean(svc, conf, s, o):
return conf_get(svc, conf, s, o, 'boolean', scope=False)
def conf_get_boolean_scope(svc, conf, s, o, impersonate=None):
return conf_get(svc, conf, s, o, 'boolean', scope=True, impersonate=impersonate)
def conf_get_int(svc, conf, s, o):
return conf_get(svc, conf, s, o, 'integer', scope=False)
def conf_get_int_scope(svc, conf, s, o, impersonate=None):
return conf_get(svc, conf, s, o, 'integer', scope=True, impersonate=impersonate)
def svcmode_mod_name(svcmode=''):
"""Returns (moduleName, serviceClassName) implementing the class for
a given service mode. For example:
hosted => ('svcHosted', 'SvcHosted')
"""
if svcmode == 'hosted':
return ('svcHosted', 'SvcHosted')
elif svcmode == 'sg':
return ('svcSg', 'SvcSg')
elif svcmode == 'rhcs':
return ('svcRhcs', 'SvcRhcs')
elif svcmode == 'vcs':
return ('svcVcs', 'SvcVcs')
raise ex.excError("unknown service mode: %s"%svcmode)
def get_tags(conf, section, svc):
try:
s = conf_get_string_scope(svc, conf, section, 'tags')
except ex.OptNotFound:
s = ""
return set(s.split())
def get_optional(conf, section, svc):
if not conf.has_section(section):
try:
return conf_get_boolean_scope(svc, conf, "DEFAULT", "optional")
except:
return False
# deprecated
if conf.has_option(section, 'optional_on'):
nodes = set([])
l = conf.get(section, "optional_on").split()
for i in l:
if i == 'nodes': nodes |= svc.nodes
elif i == 'drpnodes': nodes |= svc.drpnodes
else: nodes |= set([i.lower()])
if rcEnv.nodename in nodes:
return True
return False
try:
return conf_get_boolean_scope(svc, conf, section, "optional")
except:
return False
def get_monitor(conf, section, svc):
if not conf.has_section(section):
try:
return conf_get_boolean_scope(svc, conf, "DEFAULT", "monitor")
except:
return False
# deprecated
if conf.has_option(section, 'monitor_on'):
nodes = set([])
l = conf.get(section, "monitor_on").split()
for i in l:
if i == 'nodes': nodes |= svc.nodes
elif i == 'drpnodes': nodes |= svc.drpnodes
else: nodes |= set([i.lower()])
if rcEnv.nodename in nodes:
return True
return False
try:
return conf_get_boolean_scope(svc, conf, section, "monitor")
except:
return False
def get_rcmd(conf, section, svc):
if not conf.has_section(section):
return
try:
return conf_get_string_scope(svc, conf, section, 'rcmd').split()
except ex.OptNotFound:
return
def get_subset(conf, section, svc):
if not conf.has_section(section):
return
try:
return conf_get_string_scope(svc, conf, section, 'subset')
except ex.OptNotFound:
return
return
def get_osvc_root_path(conf, section, svc):
if not conf.has_section(section):
return
try:
return conf_get_string_scope(svc, conf, section, 'osvc_root_path')
except ex.OptNotFound:
return
return
def get_restart(conf, section, svc):
if not conf.has_section(section):
if conf.has_option('DEFAULT', 'restart'):
try:
return conf_get_int_scope(svc, conf, section, 'restart')
except ex.OptNotFound:
return 0
else:
return 0
try:
return conf_get_int_scope(svc, conf, section, 'restart')
except ex.OptNotFound:
return 0
return 0
def get_disabled(conf, section, svc):
# service-level disable takes precedence over all resource-level disable method
if conf.has_option('DEFAULT', 'disable'):
svc_disable = conf.getboolean("DEFAULT", "disable")
else:
svc_disable = False
if svc_disable is True:
return True
if section == "":
return svc_disable
# unscopable enable_on option (takes precedence over disable and disable_on)
nodes = set([])
if conf.has_option(section, 'enable_on'):
l = conf_get_string_scope(svc, conf, section, "enable_on").split()
for i in l:
if i == 'nodes': nodes |= svc.nodes
elif i == 'drpnodes': nodes |= svc.drpnodes
else: nodes |= set([i.lower()])
if rcEnv.nodename in nodes:
return False
# scoped disable option
try:
r = conf_get_boolean_scope(svc, conf, section, 'disable')
except ex.OptNotFound:
r = False
except Exception as e:
print(e, "... consider section as disabled")
r = True
if r:
return r
# unscopable disable_on option
nodes = set([])
if conf.has_option(section, 'disable_on'):
l = conf.get(section, "disable_on").split()
for i in l:
if i == 'nodes': nodes |= svc.nodes
elif i == 'drpnodes': nodes |= svc.drpnodes
else: nodes |= set([i.lower()])
if rcEnv.nodename in nodes:
return True
return False
def need_scsireserv(svc, conf, section):
"""scsireserv = true can be set globally or in a specific
resource section
"""
r = False
try:
r = conf_get_boolean_scope(svc, conf, section, 'scsireserv')
except ex.OptNotFound:
defaults = conf.defaults()
if 'scsireserv' in defaults:
r = bool(defaults['scsireserv'])
return r
def add_scsireserv(svc, resource, conf, section):
if not need_scsireserv(svc, conf, section):
return
try:
sr = __import__('resScsiReserv'+rcEnv.sysname)
except ImportError:
sr = __import__('resScsiReserv')
kwargs = {}
pr_rid = resource.rid+"pr"
try:
kwargs["prkey"] = conf_get_string_scope(svc, conf, resource.rid, 'prkey')
except ex.OptNotFound:
pass
try:
pa = conf_get_boolean_scope(svc, conf, resource.rid, 'no_preempt_abort')
except ex.OptNotFound:
pa = False
try:
kwargs['optional'] = get_optional(conf, pr_rid, svc)
except ex.OptNotFound:
kwargs['optional'] = resource.is_optional()
try:
kwargs['disabled'] = get_disabled(conf, pr_rid, svc)
except ex.OptNotFound:
kwargs['disabled'] = resource.is_disabled()
try:
kwargs['restart'] = get_restart(conf, pr_rid, svc)
except ex.OptNotFound:
kwargs['restart'] = resource.restart
try:
kwargs['monitor'] = get_monitor(conf, pr_rid, svc)
except ex.OptNotFound:
kwargs['monitor'] = resource.monitor
try:
kwargs['tags'] = get_tags(conf, pr_rid, svc)
except:
kwargs['tags'] = set([])
kwargs['rid'] = resource.rid
kwargs['tags'] |= resource.tags
kwargs['peer_resource'] = resource
kwargs['no_preempt_abort'] = pa
r = sr.ScsiReserv(**kwargs)
svc += r
def add_triggers(svc, resource, conf, section):
triggers = [
'pre_unprovision', 'post_unprovision',
'pre_provision', 'post_provision',
'pre_stop', 'pre_start',
'post_stop', 'post_start',
'pre_sync_nodes', 'pre_sync_drp',
'post_sync_nodes', 'post_sync_drp',
'post_sync_resync', 'pre_sync_resync',
'post_sync_update', 'pre_sync_update',
'post_run', 'pre_run',
]
compat_triggers = [
'pre_syncnodes', 'pre_syncdrp',
'post_syncnodes', 'post_syncdrp',
'post_syncresync', 'pre_syncresync',
'post_syncupdate', 'pre_syncupdate',
]
for trigger in triggers + compat_triggers:
for prefix in ("", "blocking_"):
try:
s = conf_get_string_scope(svc, conf, resource.rid, prefix+trigger)
except ex.OptNotFound:
continue
if trigger in compat_triggers:
trigger = trigger.replace("sync", "sync_")
setattr(resource, prefix+trigger, s)
def add_requires(svc, resource, conf, section):
actions = [
'unprovision', 'provision'
'stop', 'start',
'sync_nodes', 'sync_drp', 'sync_resync', 'sync_break', 'sync_update',
'run',
]
for action in actions:
try:
s = conf_get_string_scope(svc, conf, section, action+'_requires')
except ex.OptNotFound:
continue
s = s.replace("stdby ", "stdby_")
l = s.split(" ")
l = list(map(lambda x: x.replace("stdby_", "stdby "), l))
setattr(resource, action+'_requires', l)
def add_triggers_and_requires(svc, resource, conf, section):
add_triggers(svc, resource, conf, section)
add_requires(svc, resource, conf, section)
def always_on_nodes_set(svc, conf, section):
try:
always_on_opt = conf.get(section, "always_on").split()
except:
always_on_opt = []
always_on = set([])
if 'nodes' in always_on_opt:
always_on |= svc.nodes
if 'drpnodes' in always_on_opt:
always_on |= svc.drpnodes
always_on |= set(always_on_opt) - set(['nodes', 'drpnodes'])
return always_on
def get_sync_args(conf, s, svc):
kwargs = {}
defaults = conf.defaults()
if conf.has_option(s, 'sync_max_delay'):
kwargs['sync_max_delay'] = conf_get_int_scope(svc, conf, s, 'sync_max_delay')
elif 'sync_max_delay' in defaults:
kwargs['sync_max_delay'] = conf_get_int_scope(svc, conf, 'DEFAULT', 'sync_max_delay')
if conf.has_option(s, 'schedule'):
kwargs['schedule'] = conf_get_string_scope(svc, conf, s, 'schedule')
elif conf.has_option(s, 'period') or conf.has_option(s, 'sync_period'):
# old schedule syntax compatibility
from rcScheduler import Scheduler
kwargs['schedule'] = Scheduler().sched_convert_to_schedule(conf, s, prefix='sync_')
elif 'sync_schedule' in defaults:
kwargs['schedule'] = conf_get_string_scope(svc, conf, 'DEFAULT', 'sync_schedule')
elif 'sync_period' in defaults:
# old schedule syntax compatibility for internal sync
from rcScheduler import Scheduler
kwargs['schedule'] = Scheduler().sched_convert_to_schedule(conf, s, prefix='sync_')
return kwargs
def add_resources(restype, svc, conf):
if restype == "pool":
restype = "zpool"
match = "[z]{0,1}pool#"
else:
match = restype+"#"
for s in conf.sections():
if restype in ("disk", "vg", "zpool") and re.match(match+'.+pr', s, re.I) is not None:
# persistent reserv resource are declared by their peer resource:
# don't add them from here
continue
if s != 'app' and s != restype and re.match(match, s, re.I) is None:
continue
tags = get_tags(conf, s, svc)
if svc.encap and 'encap' not in tags:
continue
if not svc.encap and 'encap' in tags:
svc.has_encap_resources = True
try:
subset = conf_get_string_scope(svc, conf, s, 'subset')
except ex.OptNotFound:
subset = None
svc.encap_resources[s] = Storage({
"rid": s,
"tags": tags,
"subset": subset,
})
continue
if s in svc.resources_by_id:
continue
globals()['add_'+restype](svc, conf, s)
def add_ip_gce(svc, conf, s):
kwargs = {}
try:
rtype = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
rtype = None
if rtype != "gce":
return
try:
kwargs['ipname'] = conf_get_string_scope(svc, conf, s, 'ipname')
except ex.OptNotFound:
svc.log.error("ipname must be defined in config file section %s" % s)
return
try:
kwargs['ipdev'] = conf_get_string_scope(svc, conf, s, 'ipdev')
except ex.OptNotFound:
svc.log.error("ipdev must be defined in config file section %s" % s)
return
try:
kwargs['eip'] = conf_get_string_scope(svc, conf, s, 'eip')
except ex.OptNotFound:
pass
try:
kwargs['routename'] = conf_get_string_scope(svc, conf, s, 'routename')
except ex.OptNotFound:
pass
try:
kwargs['gce_zone'] = conf_get_string_scope(svc, conf, s, 'gce_zone')
except ex.OptNotFound:
pass
ip = __import__('resIpGce')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = ip.Ip(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_ip_amazon(svc, conf, s):
kwargs = {}
try:
rtype = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
rtype = None
if rtype != "amazon":
return
try:
kwargs['ipname'] = conf_get_string_scope(svc, conf, s, 'ipname')
except ex.OptNotFound:
svc.log.error("nor ipname and ipname@%s defined in config file section %s"%(rcEnv.nodename, s))
return
try:
kwargs['ipdev'] = conf_get_string_scope(svc, conf, s, 'ipdev')
except ex.OptNotFound:
svc.log.error("ipdev must be defined in config file section %s" % s)
return
try:
kwargs['eip'] = conf_get_string_scope(svc, conf, s, 'eip')
except ex.OptNotFound:
pass
ip = __import__('resIpAmazon')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = ip.Ip(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_ip(svc, conf, s):
"""Parse the configuration file and add an ip object for each [ip#n]
section. Ip objects are stored in a list in the service object.
"""
try:
rtype = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
rtype = None
if rtype == "amazon":
return add_ip_amazon(svc, conf, s)
elif rtype == "gce":
return add_ip_gce(svc, conf, s)
kwargs = {}
try:
kwargs['ipname'] = conf_get_string_scope(svc, conf, s, 'ipname')
except ex.OptNotFound:
pass
try:
kwargs['ipdev'] = conf_get_string_scope(svc, conf, s, 'ipdev')
except ex.OptNotFound:
svc.log.error('ipdev not found in ip section %s'%s)
return
try:
kwargs['ipdevExt'] = conf_get_string_scope(svc, conf, s, 'ipdevext')
except ex.OptNotFound:
pass
try:
kwargs['mask'] = conf_get_string_scope(svc, conf, s, 'netmask')
except ex.OptNotFound:
pass
try:
kwargs['gateway'] = conf_get_string_scope(svc, conf, s, 'gateway')
except ex.OptNotFound:
pass
try:
kwargs['zone'] = conf_get_string_scope(svc, conf, s, 'zone')
except ex.OptNotFound:
pass
try:
kwargs['container_rid'] = conf_get_string_scope(svc, conf, s, 'container_rid')
except ex.OptNotFound:
pass
if rtype == "docker":
try:
kwargs['network'] = conf_get_string_scope(svc, conf, s, 'network')
except ex.OptNotFound:
pass
try:
kwargs['del_net_route'] = conf_get_boolean_scope(svc, conf, s, 'del_net_route')
except ex.OptNotFound:
pass
if rtype == "crossbow":
if 'zone' in kwargs:
svc.log.error("'zone' and 'type=crossbow' are incompatible in section %s"%s)
return
ip = __import__('resIpCrossbow')
elif 'zone' in kwargs:
ip = __import__('resIpZone')
elif rtype == "docker" or "container_rid" in kwargs:
ip = __import__('resIpDocker'+rcEnv.sysname)
else:
ip = __import__('resIp'+rcEnv.sysname)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = ip.Ip(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_md(svc, conf, s):
kwargs = {}
try:
kwargs['uuid'] = conf_get_string_scope(svc, conf, s, 'uuid')
except ex.OptNotFound:
svc.log.error("uuid must be set in section %s"%s)
return
try:
kwargs['shared'] = conf_get_string_scope(svc, conf, s, 'shared')
except ex.OptNotFound:
if len(svc.nodes|svc.drpnodes) < 2:
kwargs['shared'] = False
svc.log.debug("md %s shared param defaults to %s due to single node configuration"%(s, kwargs['shared']))
else:
l = [ p for p in conf.options(s) if "@" in p ]
if len(l) > 0:
kwargs['shared'] = False
svc.log.debug("md %s shared param defaults to %s due to scoped configuration"%(s, kwargs['shared']))
else:
kwargs['shared'] = True
svc.log.debug("md %s shared param defaults to %s due to unscoped configuration"%(s, kwargs['shared']))
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
m = __import__('resDiskMdLinux')
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_drbd(svc, conf, s):
"""Parse the configuration file and add a drbd object for each [drbd#n]
section. Drbd objects are stored in a list in the service object.
"""
kwargs = {}
try:
kwargs['res'] = conf_get_string(svc, conf, s, 'res')
except ex.OptNotFound:
svc.log.error("res must be set in section %s"%s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
mod = __import__('resDiskDrbd')
r = mod.Drbd(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_vdisk(svc, conf, s):
kwargs = {}
devpath = {}
for attr, val in conf.items(s):
if 'path@' in attr:
devpath[attr.replace('path@','')] = val
if len(devpath) == 0:
svc.log.error("path@node must be set in section %s"%s)
return
kwargs['devpath'] = devpath
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
m = __import__('resDiskVdisk')
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_stonith(svc, conf, s):
if rcEnv.nodename in svc.drpnodes:
# no stonith on DRP nodes
return
kwargs = {}
try:
_type = conf_get_string(svc, conf, s, 'type')
if len(_type) > 1:
_type = _type[0].upper()+_type[1:].lower()
except ex.OptNotFound:
svc.log.error("type must be set in section %s"%s)
return
if _type in ('Ilo'):
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
pass
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'target')
except ex.OptNotFound:
pass
if 'name' not in kwargs:
svc.log.error("target must be set in section %s"%s)
return
elif _type in ('Callout'):
try:
kwargs['cmd'] = conf_get_string_scope(svc, conf, s, 'cmd')
except ex.OptNotFound:
pass
if 'cmd' not in kwargs:
svc.log.error("cmd must be set in section %s"%s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
st = __import__('resStonith'+_type)
try:
st = __import__('resStonith'+_type)
except ImportError:
svc.log.error("resStonith%s is not implemented"%_type)
return
r = st.Stonith(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_hb(svc, conf, s):
if rcEnv.nodename in svc.drpnodes:
# no heartbeat on DRP nodes
return
kwargs = {}
try:
hbtype = conf_get_string(svc, conf, s, 'type').lower()
except ex.OptNotFound:
svc.log.error("type must be set in section %s"%s)
return
try:
kwargs['name'] = conf_get_string(svc, conf, s, 'name')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
if hbtype == 'openha':
hbtype = 'OpenHA'
elif hbtype == 'linuxha':
hbtype = 'LinuxHA'
try:
hb = __import__('resHb'+hbtype)
except ImportError:
svc.log.error("resHb%s is not implemented"%hbtype)
return
r = hb.Hb(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_loop(svc, conf, s):
"""Parse the configuration file and add a loop object for each [loop#n]
section. Loop objects are stored in a list in the service object.
"""
kwargs = {}
try:
kwargs['loopFile'] = conf_get_string_scope(svc, conf, s, 'file')
except ex.OptNotFound:
svc.log.error("file must be set in section %s"%s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDiskLoop'+rcEnv.sysname)
except ImportError:
svc.log.error("resDiskLoop%s is not implemented"%rcEnv.sysname)
return
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_disk_disk(svc, conf, s):
kwargs = {}
try:
kwargs['disk_id'] = conf_get_string_scope(svc, conf, s, 'disk_id')
except ex.OptNotFound:
pass
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
m = __import__('resDiskDisk'+rcEnv.sysname)
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_disk_gce(svc, conf, s):
kwargs = {}
try:
kwargs['names'] = conf_get_string_scope(svc, conf, s, 'names').split()
except ex.OptNotFound:
svc.log.error("names must be set in section %s"%s)
return
try:
kwargs['gce_zone'] = conf_get_string_scope(svc, conf, s, 'gce_zone')
except ex.OptNotFound:
svc.log.error("gce_zone must be set in section %s"%s)
return
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
m = __import__('resDiskGce')
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_disk_amazon(svc, conf, s):
kwargs = {}
try:
kwargs['volumes'] = conf_get_string_scope(svc, conf, s, 'volumes').split()
except ex.OptNotFound:
svc.log.error("volumes must be set in section %s"%s)
return
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
m = __import__('resDiskAmazon')
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_rados(svc, conf, s):
kwargs = {}
try:
kwargs['images'] = conf_get_string_scope(svc, conf, s, 'images').split()
except ex.OptNotFound:
pass
try:
kwargs['keyring'] = conf_get_string_scope(svc, conf, s, 'keyring')
except ex.OptNotFound:
pass
try:
kwargs['client_id'] = conf_get_string_scope(svc, conf, s, 'client_id')
except ex.OptNotFound:
pass
try:
lock_shared_tag = conf_get_string_scope(svc, conf, s, 'lock_shared_tag')
except ex.OptNotFound:
lock_shared_tag = None
try:
lock = conf_get_string_scope(svc, conf, s, 'lock')
except ex.OptNotFound:
lock = None
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDiskRados'+rcEnv.sysname)
except ImportError:
svc.log.error("disk type rados is not implemented")
return
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
if not lock:
return
# rados locking resource
kwargs["rid"] = kwargs["rid"]+"lock"
kwargs["lock"] = lock
kwargs["lock_shared_tag"] = lock_shared_tag
r = m.DiskLock(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_raw(svc, conf, s):
kwargs = {}
disk_type = "Raw"+rcEnv.sysname
try:
zone = conf_get_string_scope(svc, conf, s, 'zone')
except:
zone = None
try:
kwargs['user'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
pass
try:
kwargs['group'] = conf_get_string_scope(svc, conf, s, 'group')
except ex.OptNotFound:
pass
try:
kwargs['perm'] = conf_get_string_scope(svc, conf, s, 'perm')
except ex.OptNotFound:
pass
try:
kwargs['create_char_devices'] = conf_get_boolean_scope(svc, conf, s, 'create_char_devices')
except ex.OptNotFound:
pass
try:
devs = conf_get_string_scope(svc, conf, s, 'devs')
if zone is not None:
devs = devs.replace(":", ":<%s>" % zone)
kwargs['devs'] = set(devs.split())
except ex.OptNotFound:
svc.log.error("devs must be set in section %s"%s)
return
# backward compat : the dummy keyword is deprecated in favor of
# the standard "noaction" tag.
try:
dummy = conf_get_boolean_scope(svc, conf, s, 'dummy')
except ex.OptNotFound:
dummy = False
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDisk'+disk_type)
except ImportError:
svc.log.error("disk type %s driver is not implemented"%disk_type)
return
r = m.Disk(**kwargs)
if dummy:
r.tags.add("noaction")
if zone is not None:
r.tags.add('zone')
r.tags.add(zone)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_gandi(svc, conf, s):
disk_type = "Gandi"
kwargs = {}
try:
kwargs['cloud_id'] = conf_get_string_scope(svc, conf, s, 'cloud_id')
except ex.OptNotFound:
svc.log.error("cloud_id must be set in section %s"%s)
return
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
svc.log.error("name must be set in section %s"%s)
return
try:
kwargs['node'] = conf_get_string_scope(svc, conf, s, 'node')
except ex.OptNotFound:
pass
try:
kwargs['user'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
pass
try:
kwargs['group'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
pass
try:
kwargs['perm'] = conf_get_string_scope(svc, conf, s, 'perm')
except ex.OptNotFound:
pass
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDisk'+disk_type)
except ImportError:
svc.log.error("disk type %s is not implemented"%disk_type)
return
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_disk_compat(svc, conf, s):
try:
disk_type = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
disk_type = s.split("#")[0]
if len(disk_type) >= 2:
disk_type = disk_type[0].upper() + disk_type[1:].lower()
if disk_type == 'Drbd':
add_drbd(svc, conf, s)
return
if disk_type == 'Vdisk':
add_vdisk(svc, conf, s)
return
if disk_type == 'Vmdg':
add_vmdg(svc, conf, s)
return
if disk_type == 'Pool':
add_zpool(svc, conf, s)
return
if disk_type == 'Zpool':
add_zpool(svc, conf, s)
return
if disk_type == 'Loop':
add_loop(svc, conf, s)
return
if disk_type == 'Md':
add_md(svc, conf, s)
return
if disk_type == 'Gce':
add_disk_gce(svc, conf, s)
return
if disk_type == 'Disk':
add_disk_disk(svc, conf, s)
return
if disk_type == 'Amazon':
add_disk_amazon(svc, conf, s)
return
if disk_type == 'Rados':
add_rados(svc, conf, s)
return
if disk_type == 'Raw':
add_raw(svc, conf, s)
return
if disk_type == 'Gandi':
add_gandi(svc, conf, s)
return
if disk_type == 'Veritas':
add_veritas(svc, conf, s)
return
raise ex.OptNotFound
def add_veritas(svc, conf, s):
kwargs = {}
try:
# deprecated keyword 'vgname'
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'vgname')
except ex.OptNotFound:
pass
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
if "name" not in kwargs:
svc.log.error("name must be set in section %s"%s)
return
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDiskVgVeritas')
except ImportError:
svc.log.error("disk type veritas is not implemented")
return
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_vg(svc, conf, s):
try:
add_disk_compat(svc, conf, s)
return
except ex.OptNotFound:
pass
disk_type = rcEnv.sysname
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'vgname')
except ex.OptNotFound:
pass
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
if "name" not in kwargs:
svc.log.error("name must be set in section %s"%s)
return
try:
kwargs['dsf'] = conf_get_boolean_scope(svc, conf, s, 'dsf')
except ex.OptNotFound:
pass
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
try:
m = __import__('resDiskVg'+disk_type)
except ImportError:
svc.log.error("disk type %s is not implemented"%disk_type)
return
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_disk(svc, conf, s):
"""Parse the configuration file and add a disk object for each [disk#n]
section. Disk objects are stored in a list in the service object.
"""
kwargs = {}
try:
disk_type = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
disk_type = s.split("#")[0]
if len(disk_type) >= 2:
disk_type = disk_type[0].upper() + disk_type[1:].lower()
if disk_type == 'Drbd':
add_drbd(svc, conf, s)
return
if disk_type == 'Vdisk':
add_vdisk(svc, conf, s)
return
if disk_type == 'Vmdg':
add_vmdg(svc, conf, s)
return
if disk_type == 'Pool':
add_zpool(svc, conf, s)
return
if disk_type == 'Zpool':
add_zpool(svc, conf, s)
return
if disk_type == 'Loop':
add_loop(svc, conf, s)
return
if disk_type == 'Md':
add_md(svc, conf, s)
return
if disk_type == 'Gce':
add_disk_gce(svc, conf, s)
return
if disk_type == 'Disk':
add_disk_disk(svc, conf, s)
return
if disk_type == 'Amazon':
add_disk_amazon(svc, conf, s)
return
if disk_type == 'Rados':
add_rados(svc, conf, s)
return
if disk_type == 'Raw':
add_raw(svc, conf, s)
return
if disk_type == 'Gandi':
add_gandi(svc, conf, s)
return
if disk_type == 'Veritas':
add_veritas(svc, conf, s)
return
if disk_type == 'Lvm' or disk_type == 'Vg' or disk_type == rcEnv.sysname:
add_vg(svc, conf, s)
return
def add_vmdg(svc, conf, s):
kwargs = {}
try:
kwargs['container_id'] = conf_get_string_scope(svc, conf, s, 'container_id')
except ex.OptNotFound:
svc.log.error("container_id must be set in section %s"%s)
return
if not conf.has_section(kwargs['container_id']):
svc.log.error("%s.container_id points to an invalid section"%kwargs['container_id'])
return
try:
container_type = conf_get_string_scope(svc, conf, kwargs['container_id'], 'type')
except ex.OptNotFound:
svc.log.error("type must be set in section %s"%kwargs['container_id'])
return
if container_type == 'ldom':
m = __import__('resDiskLdom')
else:
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['name'] = s
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = m.Disk(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_zpool(svc, conf, s):
"""Parse the configuration file and add a zpool object for each disk.zpool
section. Pools objects are stored in a list in the service object.
"""
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'poolname')
except ex.OptNotFound:
pass
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
pass
if "name" not in kwargs:
svc.log.error("name must be set in section %s"%s)
return
try:
zone = conf_get_string_scope(svc, conf, s, 'zone')
except ex.OptNotFound:
zone = None
m = __import__('resDiskZfs')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = m.Disk(**kwargs)
if zone is not None:
r.tags.add('zone')
r.tags.add(zone)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_share(svc, conf, s):
try:
_type = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
svc.log.error("type must be set in section %s"%s)
return
fname = 'add_share_'+_type
if fname not in globals():
svc.log.error("type '%s' not supported in section %s"%(_type, s))
globals()[fname](svc, conf, s)
def add_share_nfs(svc, conf, s):
kwargs = {}
try:
kwargs['path'] = conf_get_string_scope(svc, conf, s, 'path')
except ex.OptNotFound:
svc.log.error("path must be set in section %s"%s)
return
try:
kwargs['opts'] = conf_get_string_scope(svc, conf, s, 'opts')
except ex.OptNotFound:
svc.log.error("opts must be set in section %s"%s)
return
try:
m = __import__('resShareNfs'+rcEnv.sysname)
except ImportError:
svc.log.error("resShareNfs%s is not implemented"%rcEnv.sysname)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = m.Share(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_fs_directory(svc, conf, s):
kwargs = {}
try:
kwargs['path'] = conf_get_string_scope(svc, conf, s, 'path')
except ex.OptNotFound:
svc.log.error("path must be set in section %s"%s)
return
try:
kwargs['user'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
pass
try:
kwargs['group'] = conf_get_string_scope(svc, conf, s, 'group')
except ex.OptNotFound:
pass
try:
kwargs['perm'] = conf_get_string_scope(svc, conf, s, 'perm')
except ex.OptNotFound:
pass
try:
zone = conf_get_string_scope(svc, conf, s, 'zone')
except:
zone = None
if zone is not None:
zp = None
for r in svc.get_resources("container.zone", discard_disabled=False):
if r.name == zone:
try:
zp = r.get_zonepath()
except:
zp = "<%s>" % zone
break
if zp is None:
svc.log.error("zone %s, referenced in %s, not found"%(zone, s))
raise ex.excError()
kwargs['path'] = zp+'/root'+kwargs['path']
if "<%s>" % zone != zp:
kwargs['path'] = os.path.realpath(kwargs['path'])
mod = __import__('resFsDir')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = mod.FsDir(**kwargs)
if zone is not None:
r.tags.add(zone)
r.tags.add('zone')
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_fs(svc, conf, s):
"""Parse the configuration file and add a fs object for each [fs#n]
section. Fs objects are stored in a list in the service object.
"""
kwargs = {}
try:
kwargs['fs_type'] = conf_get_string_scope(svc, conf, s, 'type')
except ex.OptNotFound:
kwargs['fs_type'] = ""
if kwargs['fs_type'] == "directory":
add_fs_directory(svc, conf, s)
return
try:
kwargs['device'] = conf_get_string_scope(svc, conf, s, 'dev')
except ex.OptNotFound:
svc.log.error("dev must be set in section %s"%s)
return
try:
kwargs['mount_point'] = conf_get_string_scope(svc, conf, s, 'mnt')
except ex.OptNotFound:
svc.log.error("mnt must be set in section %s"%s)
return
if kwargs['mount_point'][-1] != "/" and kwargs['mount_point'][-1] == '/':
""" Remove trailing / to not risk losing rsync src trailing /
upon snap mountpoint substitution.
"""
kwargs['mount_point'] = kwargs['mount_point'][0:-1]
try:
kwargs['mount_options'] = conf_get_string_scope(svc, conf, s, 'mnt_opt')
except ex.OptNotFound:
kwargs['mount_options'] = ""
try:
kwargs['snap_size'] = conf_get_int_scope(svc, conf, s, 'snap_size')
except ex.OptNotFound:
pass
try:
zone = conf_get_string_scope(svc, conf, s, 'zone')
except:
zone = None
if zone is not None:
zp = None
for r in svc.get_resources("container.zone", discard_disabled=False):
if r.name == zone:
try:
zp = r.get_zonepath()
except:
zp = "<%s>" % zone
break
if zp is None:
svc.log.error("zone %s, referenced in %s, not found"%(zone, s))
raise ex.excError()
kwargs['mount_point'] = zp+'/root'+kwargs['mount_point']
if "<%s>" % zone != zp:
kwargs['mount_point'] = os.path.realpath(kwargs['mount_point'])
try:
mount = __import__('resFs'+rcEnv.sysname)
except ImportError:
svc.log.error("resFs%s is not implemented"%rcEnv.sysname)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = mount.Mount(**kwargs)
if zone is not None:
r.tags.add(zone)
r.tags.add('zone')
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_esx(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerEsx')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Esx(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_hpvm(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerHpVm')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.HpVm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_ldom(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerLdom')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Ldom(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_vbox(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerVbox')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Vbox(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_xen(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerXen')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Xen(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_zone(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
try:
kwargs['delete_on_stop'] = conf_get_boolean_scope(svc, conf, s, 'delete_on_stop')
except ex.OptNotFound:
pass
m = __import__('resContainerZone')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Zone(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_vcloud(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
try:
kwargs['cloud_id'] = conf_get_string_scope(svc, conf, s, 'cloud_id')
except ex.OptNotFound:
svc.log.error("cloud_id must be set in section %s"%s)
return
try:
kwargs['vapp'] = conf_get_string_scope(svc, conf, s, 'vapp')
except ex.OptNotFound:
svc.log.error("vapp must be set in section %s"%s)
return
m = __import__('resContainerVcloud')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.CloudVm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_amazon(svc, conf, s):
kwargs = {}
# mandatory keywords
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['cloud_id'] = conf_get_string_scope(svc, conf, s, 'cloud_id')
except ex.OptNotFound:
svc.log.error("cloud_id must be set in section %s"%s)
return
# optional keywords
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
# provisioning keywords
try:
kwargs['image_id'] = conf_get_string_scope(svc, conf, s, 'image_id')
except ex.OptNotFound:
pass
try:
kwargs['size'] = conf_get_string_scope(svc, conf, s, 'size')
except ex.OptNotFound:
pass
try:
kwargs['key_name'] = conf_get_string_scope(svc, conf, s, 'key_name')
except ex.OptNotFound:
pass
try:
kwargs['subnet'] = conf_get_string_scope(svc, conf, s, 'subnet')
except ex.OptNotFound:
pass
m = __import__('resContainerAmazon')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.CloudVm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_openstack(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
try:
kwargs['cloud_id'] = conf_get_string_scope(svc, conf, s, 'cloud_id')
except ex.OptNotFound:
svc.log.error("cloud_id must be set in section %s"%s)
return
try:
kwargs['size'] = conf_get_string_scope(svc, conf, s, 'size')
except ex.OptNotFound:
svc.log.error("size must be set in section %s"%s)
return
try:
kwargs['key_name'] = conf_get_string_scope(svc, conf, s, 'key_name')
except ex.OptNotFound:
svc.log.error("key_name must be set in section %s"%s)
return
try:
kwargs['shared_ip_group'] = conf_get_string_scope(svc, conf, s, 'shared_ip_group')
except ex.OptNotFound:
pass
m = __import__('resContainerOpenstack')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.CloudVm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_vz(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerVz')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Vz(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_kvm(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
svc.log.error("name must be set in section %s"%s)
return
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerKvm')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Kvm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_srp(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerSrp')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Srp(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_lxc(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
kwargs['name'] = svc.svcname
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
try:
kwargs['cf'] = conf_get_string_scope(svc, conf, s, 'cf')
except ex.OptNotFound:
pass
m = __import__('resContainerLxc')
kwargs['rid'] = s
kwargs['rcmd'] = get_rcmd(conf, s, svc)
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Lxc(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_docker(svc, conf, s):
kwargs = {}
try:
kwargs['run_image'] = conf_get_string_scope(svc, conf, s, 'run_image')
except ex.OptNotFound:
svc.log.error("'run_image' parameter is mandatory in section %s"%s)
return
try:
kwargs['run_command'] = conf_get_string_scope(svc, conf, s, 'run_command')
except ex.OptNotFound:
pass
try:
kwargs['run_args'] = conf_get_string_scope(svc, conf, s, 'run_args')
except ex.OptNotFound:
pass
try:
kwargs['docker_service'] = conf_get_boolean_scope(svc, conf, s, 'docker_service')
except ex.OptNotFound:
pass
m = __import__('resContainerDocker')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Docker(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_ovm(svc, conf, s):
kwargs = {}
try:
kwargs['uuid'] = conf_get_string_scope(svc, conf, s, 'uuid')
except ex.OptNotFound:
svc.log.error("uuid must be set in section %s"%s)
return
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
svc.log.error("name must be set in section %s"%s)
return
try:
kwargs['guestos'] = conf_get_string_scope(svc, conf, s, 'guestos')
except ex.OptNotFound:
pass
m = __import__('resContainerOvm')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Ovm(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers_jail(svc, conf, s):
kwargs = {}
try:
kwargs['jailroot'] = conf_get_string_scope(svc, conf, s, 'jailroot')
except ex.OptNotFound:
svc.log.error("jailroot must be set in section %s"%s)
return
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
svc.log.error("name must be set in section %s"%s)
return
try:
kwargs['ips'] = conf_get_string_scope(svc, conf, s, 'ips').split()
except ex.OptNotFound:
pass
try:
kwargs['ip6s'] = conf_get_string_scope(svc, conf, s, 'ip6s').split()
except ex.OptNotFound:
pass
m = __import__('resContainerJail')
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
kwargs['osvc_root_path'] = get_osvc_root_path(conf, s, svc)
r = m.Jail(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
add_scsireserv(svc, r, conf, s)
def add_containers(svc, conf):
for t in rcEnv.vt_supported:
add_containers_resources(t, svc, conf)
def add_containers_resources(subtype, svc, conf):
add_sub_resources('container', subtype, svc, conf)
def add_mandatory_syncs(svc, conf):
"""Mandatory files to sync:
1/ to all nodes: service definition
2/ to drpnodes: system files to replace on the drpnode in case of startdrp
"""
"""1
"""
def add_file(flist, fpath):
if not os.path.exists(fpath):
return flist
flist.append(fpath)
return flist
if len(svc.nodes|svc.drpnodes) > 1:
kwargs = {}
src = []
src = add_file(src, os.path.join(rcEnv.pathetc, svc.svcname))
src = add_file(src, os.path.join(rcEnv.pathetc, svc.svcname+'.conf'))
src = add_file(src, os.path.join(rcEnv.pathetc, svc.svcname+'.d'))
src = add_file(src, os.path.join(rcEnv.pathetc, svc.svcname+'.cluster'))
src = add_file(src, os.path.join(rcEnv.pathetc, svc.svcname+'.dir'))
dst = os.path.join("/")
exclude = ['--exclude=*.core']
targethash = {'nodes': svc.nodes, 'drpnodes': svc.drpnodes}
kwargs['rid'] = "sync#i0"
kwargs['src'] = src
kwargs['dst'] = dst
kwargs['options'] = ['-R']+exclude
if conf.has_option(kwargs['rid'], 'options'):
kwargs['options'] += cmdline2list(conf.get(kwargs['rid'], 'options'))
kwargs['target'] = targethash
kwargs['internal'] = True
kwargs['disabled'] = get_disabled(conf, kwargs['rid'], svc)
kwargs['optional'] = get_optional(conf, kwargs['rid'], svc)
kwargs.update(get_sync_args(conf, kwargs['rid'], svc))
r = resSyncRsync.Rsync(**kwargs)
svc += r
"""2
"""
if len(svc.drpnodes) == 0:
return
targethash = {'drpnodes': svc.drpnodes}
""" Reparent all PRD backed-up file in drp_path/node on the drpnode
"""
dst = os.path.join(rcEnv.drp_path, rcEnv.nodename)
i = 0
for src, exclude in rcEnv.drp_sync_files:
"""'-R' triggers rsync relative mode
"""
kwargs = {}
src = [ s for s in src if os.path.exists(s) ]
if len(src) == 0:
continue
i += 1
kwargs['rid'] = "sync#i"+str(i)
kwargs['src'] = src
kwargs['dst'] = dst
kwargs['options'] = ['-R']+exclude
if conf.has_option(kwargs['rid'], 'options'):
kwargs['options'] += cmdline2list(conf.get(kwargs['rid'], 'options'))
kwargs['target'] = targethash
kwargs['internal'] = True
kwargs['disabled'] = get_disabled(conf, kwargs['rid'], svc)
kwargs['optional'] = get_optional(conf, kwargs['rid'], svc)
kwargs.update(get_sync_args(conf, kwargs['rid'], svc))
r = resSyncRsync.Rsync(**kwargs)
svc += r
def add_syncs_resources(subtype, svc, conf):
add_sub_resources('sync', subtype, svc, conf, default_subtype="rsync")
def add_sub_resources(restype, subtype, svc, conf, default_subtype=None):
for s in conf.sections():
if re.match(restype+'#', s, re.I) is None:
continue
if svc.encap and 'encap' not in get_tags(conf, s, svc):
continue
if not svc.encap and 'encap' in get_tags(conf, s, svc):
svc.has_encap_resources = True
continue
try:
res_subtype = conf_get_string_scope(svc, conf, s, "type")
except ex.OptNotFound:
res_subtype = default_subtype
if subtype != res_subtype:
continue
globals()['add_'+restype+'s_'+subtype](svc, conf, s)
def add_syncs(svc, conf):
add_syncs_resources('rsync', svc, conf)
add_syncs_resources('netapp', svc, conf)
add_syncs_resources('nexenta', svc, conf)
add_syncs_resources('radossnap', svc, conf)
add_syncs_resources('radosclone', svc, conf)
add_syncs_resources('symclone', svc, conf)
add_syncs_resources('symsnap', svc, conf)
add_syncs_resources('symsrdfs', svc, conf)
add_syncs_resources('hp3par', svc, conf)
add_syncs_resources('hp3parsnap', svc, conf)
add_syncs_resources('ibmdssnap', svc, conf)
add_syncs_resources('evasnap', svc, conf)
add_syncs_resources('necismsnap', svc, conf)
add_syncs_resources('btrfssnap', svc, conf)
add_syncs_resources('zfssnap', svc, conf)
add_syncs_resources('s3', svc, conf)
add_syncs_resources('dcssnap', svc, conf)
add_syncs_resources('dcsckpt', svc, conf)
add_syncs_resources('dds', svc, conf)
add_syncs_resources('zfs', svc, conf)
add_syncs_resources('btrfs', svc, conf)
add_syncs_resources('docker', svc, conf)
add_mandatory_syncs(svc, conf)
def add_syncs_docker(svc, conf, s):
kwargs = {}
try:
kwargs['target'] = conf_get_string_scope(svc, conf, s, 'target').split(' ')
except ex.OptNotFound:
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
m = __import__('resSyncDocker')
r = m.SyncDocker(**kwargs)
svc += r
def add_syncs_btrfs(svc, conf, s):
kwargs = {}
try:
kwargs['src'] = conf_get_string_scope(svc, conf, s, 'src')
except ex.OptNotFound:
svc.log.error("config file section %s must have src set" % s)
return
try:
kwargs['dst'] = conf_get_string_scope(svc, conf, s, 'dst')
except ex.OptNotFound:
svc.log.error("config file section %s must have dst set" % s)
return
try:
kwargs['target'] = conf_get_string_scope(svc, conf, s, 'target').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have target set" % s)
return
try:
kwargs['recursive'] = conf_get_boolean(svc, conf, s, 'recursive')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
btrfs = __import__('resSyncBtrfs')
r = btrfs.SyncBtrfs(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_zfs(svc, conf, s):
kwargs = {}
try:
kwargs['src'] = conf_get_string_scope(svc, conf, s, 'src')
except ex.OptNotFound:
svc.log.error("config file section %s must have src set" % s)
return
try:
kwargs['dst'] = conf_get_string_scope(svc, conf, s, 'dst')
except ex.OptNotFound:
svc.log.error("config file section %s must have dst set" % s)
return
try:
kwargs['target'] = conf_get_string_scope(svc, conf, s, 'target').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have target set" % s)
return
try:
kwargs['recursive'] = conf_get_boolean(svc, conf, s, 'recursive')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
zfs = __import__('resSyncZfs')
r = zfs.SyncZfs(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_dds(svc, conf, s):
kwargs = {}
try:
kwargs['src'] = conf_get_string_scope(svc, conf, s, 'src')
except ex.OptNotFound:
svc.log.error("config file section %s must have src set" % s)
return
dsts = {}
for node in svc.nodes | svc.drpnodes:
dst = conf_get_string_scope(svc, conf, s, 'dst', impersonate=node)
dsts[node] = dst
if len(dsts) == 0:
for node in svc.nodes | svc.drpnodes:
dsts[node] = kwargs['src']
kwargs['dsts'] = dsts
try:
kwargs['target'] = conf_get_string_scope(svc, conf, s, 'target').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have target set" % s)
return
try:
kwargs['sender'] = conf_get_string(svc, conf, s, 'sender')
except ex.OptNotFound:
pass
try:
kwargs['snap_size'] = conf_get_int_scope(svc, conf, s, 'snap_size')
except ex.OptNotFound:
pass
try:
kwargs['delta_store'] = conf_get_string_scope(svc, conf, s, 'delta_store')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
dds = __import__('resSyncDds')
r = dds.syncDds(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_dcsckpt(svc, conf, s):
kwargs = {}
try:
kwargs['dcs'] = set(conf_get_string_scope(svc, conf, s, 'dcs').split())
except ex.OptNotFound:
svc.log.error("config file section %s must have 'dcs' set" % s)
return
try:
kwargs['manager'] = set(conf_get_string_scope(svc, conf, s, 'manager').split())
except ex.OptNotFound:
svc.log.error("config file section %s must have 'manager' set" % s)
return
import json
pairs = []
if 'pairs' in conf.options(s):
try:
pairs = json.loads(conf.get(s, 'pairs'))
if len(pairs) == 0:
svc.log.error("config file section %s must have 'pairs' set" % s)
return
except:
svc.log.error("json error parsing 'pairs' in section %s" % s)
kwargs['pairs'] = pairs
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncDcsCkpt'+rcEnv.sysname)
except:
sc = __import__('resSyncDcsCkpt')
r = sc.syncDcsCkpt(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_dcssnap(svc, conf, s):
kwargs = {}
try:
kwargs['dcs'] = set(conf_get_string(svc, conf, s, 'dcs').split())
except ex.OptNotFound:
svc.log.error("config file section %s must have 'dcs' set" % s)
return
try:
kwargs['manager'] = set(conf_get_string(svc, conf, s, 'manager').split())
except ex.OptNotFound:
svc.log.error("config file section %s must have 'manager' set" % s)
return
try:
kwargs['snapname'] = set(conf_get_string(svc, conf, s, 'snapname').split())
except ex.OptNotFound:
svc.log.error("config file section %s must have 'snapname' set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncDcsSnap'+rcEnv.sysname)
except:
sc = __import__('resSyncDcsSnap')
r = sc.syncDcsSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_s3(svc, conf, s):
kwargs = {}
try:
kwargs['full_schedule'] = conf_get_string_scope(svc, conf, s, 'full_schedule')
except ex.OptNotFound:
pass
try:
kwargs['options'] = conf_get_string_scope(svc, conf, s, 'options').split()
except ex.OptNotFound:
pass
try:
kwargs['snar'] = conf_get_string_scope(svc, conf, s, 'snar')
except ex.OptNotFound:
pass
try:
kwargs['bucket'] = conf_get_string_scope(svc, conf, s, 'bucket')
except ex.OptNotFound:
svc.log.error("config file section %s must have bucket set" % s)
return
try:
kwargs['src'] = conf_get_string_scope(svc, conf, s, 'src').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have src set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
sc = __import__('resSyncS3')
r = sc.syncS3(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_zfssnap(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
pass
try:
kwargs['keep'] = conf_get_int_scope(svc, conf, s, 'keep')
except ex.OptNotFound:
pass
try:
kwargs['recursive'] = conf_get_boolean_scope(svc, conf, s, 'recursive')
except ex.OptNotFound:
pass
try:
kwargs['dataset'] = conf_get_string_scope(svc, conf, s, 'dataset').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have dataset set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
sc = __import__('resSyncZfsSnap')
r = sc.syncZfsSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_btrfssnap(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string_scope(svc, conf, s, 'name')
except ex.OptNotFound:
pass
try:
kwargs['keep'] = conf_get_int_scope(svc, conf, s, 'keep')
except ex.OptNotFound:
pass
try:
kwargs['subvol'] = conf_get_string_scope(svc, conf, s, 'subvol').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have subvol set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
sc = __import__('resSyncBtrfsSnap')
r = sc.syncBtrfsSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_necismsnap(svc, conf, s):
kwargs = {}
try:
kwargs['array'] = conf_get_string_scope(svc, conf, s, 'array')
except ex.OptNotFound:
pass
try:
kwargs['devs'] = conf_get_string_scope(svc, conf, s, 'devs')
except ex.OptNotFound:
svc.log.error("config file section %s must have devs set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncNecIsmSnap'+rcEnv.sysname)
except:
sc = __import__('resSyncNecIsmSnap')
r = sc.syncNecIsmSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_evasnap(svc, conf, s):
kwargs = {}
try:
kwargs['eva_name'] = conf_get_string(svc, conf, s, 'eva_name')
except ex.OptNotFound:
svc.log.error("config file section %s must have eva_name set" % s)
return
try:
kwargs['snap_name'] = conf_get_string(svc, conf, s, 'snap_name')
except ex.OptNotFound:
kwargs['snap_name'] = svc.svcname
import json
pairs = []
if 'pairs' in conf.options(s):
pairs = json.loads(conf.get(s, 'pairs'))
if len(pairs) == 0:
svc.log.error("config file section %s must have pairs set" % s)
return
else:
kwargs['pairs'] = pairs
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncEvasnap'+rcEnv.sysname)
except:
sc = __import__('resSyncEvasnap')
r = sc.syncEvasnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_hp3parsnap(svc, conf, s):
kwargs = {}
try:
kwargs['array'] = conf_get_string_scope(svc, conf, s, 'array')
except ex.OptNotFound:
svc.log.error("config file section %s must have array set" % s)
return
try:
vv_names = conf_get_string_scope(svc, conf, s, 'vv_names').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have vv_names set" % s)
return
if len(vv_names) == 0:
svc.log.error("config file section %s must have at least one vv_name set" % s)
return
kwargs['vv_names'] = vv_names
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncHp3parSnap'+rcEnv.sysname)
except:
sc = __import__('resSyncHp3parSnap')
r = sc.syncHp3parSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_hp3par(svc, conf, s):
kwargs = {}
try:
kwargs['mode'] = conf_get_string_scope(svc, conf, s, 'mode')
except ex.OptNotFound:
svc.log.error("config file section %s must have mode set" % s)
return
try:
kwargs['array'] = conf_get_string_scope(svc, conf, s, 'array')
except ex.OptNotFound:
svc.log.error("config file section %s must have array set" % s)
return
rcg_names = {}
for node in svc.nodes | svc.drpnodes:
array = conf_get_string_scope(svc, conf, s, 'array', impersonate=node)
rcg = conf_get_string_scope(svc, conf, s, 'rcg', impersonate=node)
rcg_names[array] = rcg
if len(rcg_names) == 0:
svc.log.error("config file section %s must have rcg set" % s)
return
kwargs['rcg_names'] = rcg_names
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncHp3par'+rcEnv.sysname)
except:
sc = __import__('resSyncHp3par')
r = sc.syncHp3par(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_symsrdfs(svc, conf, s):
kwargs = {}
try:
kwargs['symdg'] = conf_get_string(svc, conf, s, 'symdg')
except ex.OptNotFound:
svc.log.error("config file section %s must have symdg set" % s)
return
try:
kwargs['rdfg'] = conf_get_int(svc, conf, s, 'rdfg')
except ex.OptNotFound:
svc.log.error("config file section %s must have rdfg number set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncSymSrdfS'+rcEnv.sysname)
except:
sc = __import__('resSyncSymSrdfS')
r = sc.syncSymSrdfS(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_radosclone(svc, conf, s):
kwargs = {}
try:
kwargs['client_id'] = conf_get_string_scope(svc, conf, s, 'client_id')
except ex.OptNotFound:
pass
try:
kwargs['keyring'] = conf_get_string_scope(svc, conf, s, 'keyring')
except ex.OptNotFound:
pass
try:
kwargs['pairs'] = conf_get_string_scope(svc, conf, s, 'pairs').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have pairs set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncRados'+rcEnv.sysname)
except:
sc = __import__('resSyncRados')
r = sc.syncRadosClone(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_radossnap(svc, conf, s):
kwargs = {}
try:
kwargs['client_id'] = conf_get_string_scope(svc, conf, s, 'client_id')
except ex.OptNotFound:
pass
try:
kwargs['keyring'] = conf_get_string_scope(svc, conf, s, 'keyring')
except ex.OptNotFound:
pass
try:
kwargs['images'] = conf_get_string_scope(svc, conf, s, 'images').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have images set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncRados'+rcEnv.sysname)
except:
sc = __import__('resSyncRados')
r = sc.syncRadosSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_symsnap(svc, conf, s):
_add_syncs_symclone(svc, conf, s, "sync.symsnap")
def add_syncs_symclone(svc, conf, s):
_add_syncs_symclone(svc, conf, s, "sync.symclone")
def _add_syncs_symclone(svc, conf, s, t):
kwargs = {}
kwargs['type'] = t
try:
kwargs['pairs'] = conf_get_string(svc, conf, s, 'pairs').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have pairs set" % s)
return
try:
kwargs['symid'] = conf_get_string_scope(svc, conf, s, 'symid')
except ex.OptNotFound:
svc.log.error("config file section %s must have sid set" % s)
return
try:
kwargs['recreate_timeout'] = conf_get_int(svc, conf, s, 'recreate_timeout')
except ex.OptNotFound:
pass
try:
kwargs['consistent'] = conf_get_boolean(svc, conf, s, 'consistent')
except ex.OptNotFound:
pass
try:
kwargs['precopy'] = conf_get_boolean(svc, conf, s, 'precopy')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
sc = __import__('resSyncSymclone'+rcEnv.sysname)
except:
sc = __import__('resSyncSymclone')
r = sc.syncSymclone(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_ibmdssnap(svc, conf, s):
kwargs = {}
try:
kwargs['pairs'] = conf_get_string(svc, conf, s, 'pairs').split()
except ex.OptNotFound:
svc.log.error("config file section %s must have pairs set" % s)
return
try:
kwargs['array'] = conf_get_string(svc, conf, s, 'array')
except ex.OptNotFound:
svc.log.error("config file section %s must have array set" % s)
return
try:
kwargs['bgcopy'] = conf_get_boolean(svc, conf, s, 'bgcopy')
except ex.OptNotFound:
svc.log.error("config file section %s must have bgcopy set" % s)
return
try:
kwargs['recording'] = conf_get_boolean(svc, conf, s, 'recording')
except ex.OptNotFound:
svc.log.error("config file section %s must have recording set" % s)
return
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
try:
m = __import__('resSyncIbmdsSnap'+rcEnv.sysname)
except:
m = __import__('resSyncIbmdsSnap')
r = m.syncIbmdsSnap(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_nexenta(svc, conf, s):
kwargs = {}
try:
kwargs['name'] = conf_get_string(svc, conf, s, 'name')
except ex.OptNotFound:
svc.log.error("config file section %s must have 'name' set" % s)
return
try:
kwargs['path'] = conf_get_string_scope(svc, conf, s, 'path')
except ex.OptNotFound:
svc.log.error("config file section %s must have path set" % s)
return
try:
kwargs['reversible'] = conf_get_boolean_scope(svc, conf, s, "reversible")
except:
pass
filers = {}
if 'filer' in conf.options(s):
for n in svc.nodes | svc.drpnodes:
filers[n] = conf.get(s, 'filer')
if 'filer@nodes' in conf.options(s):
for n in svc.nodes:
filers[n] = conf.get(s, 'filer@nodes')
if 'filer@drpnodes' in conf.options(s):
for n in svc.nodes:
filers[n] = conf.get(s, 'filer@drpnodes')
for o in conf.options(s):
if 'filer@' not in o:
continue
(filer, node) = o.split('@')
if node in ('nodes', 'drpnodes'):
continue
filers[node] = conf.get(s, o)
if rcEnv.nodename not in filers:
svc.log.error("config file section %s must have filer@%s set" %(s, rcEnv.nodename))
kwargs['filers'] = filers
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
import resSyncNexenta
r = resSyncNexenta.syncNexenta(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_netapp(svc, conf, s):
kwargs = {}
try:
kwargs['path'] = conf_get_string_scope(svc, conf, s, 'path')
except ex.OptNotFound:
svc.log.error("config file section %s must have path set" % s)
return
try:
kwargs['user'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
svc.log.error("config file section %s must have user set" % s)
return
filers = {}
if 'filer' in conf.options(s):
for n in svc.nodes | svc.drpnodes:
filers[n] = conf.get(s, 'filer')
if 'filer@nodes' in conf.options(s):
for n in svc.nodes:
filers[n] = conf.get(s, 'filer@nodes')
if 'filer@drpnodes' in conf.options(s):
for n in svc.nodes:
filers[n] = conf.get(s, 'filer@drpnodes')
for o in conf.options(s):
if 'filer@' not in o:
continue
(filer, node) = o.split('@')
if node in ('nodes', 'drpnodes'):
continue
filers[node] = conf.get(s, o)
if rcEnv.nodename not in filers:
svc.log.error("config file section %s must have filer@%s set" %(s, rcEnv.nodename))
kwargs['filers'] = filers
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
import resSyncNetapp
r = resSyncNetapp.syncNetapp(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_syncs_rsync(svc, conf, s):
if s.startswith("sync#i"):
# internal syncs have their own dedicated add function
return
options = []
kwargs = {}
kwargs['src'] = []
try:
_s = conf_get_string_scope(svc, conf, s, 'src')
except ex.OptNotFound:
svc.log.error("config file section %s must have src set" % s)
return
for src in _s.split():
kwargs['src'] += glob.glob(src)
try:
kwargs['dst'] = conf_get_string_scope(svc, conf, s, 'dst')
except ex.OptNotFound:
svc.log.error("config file section %s must have dst set" % s)
return
try:
kwargs['dstfs'] = conf_get_string_scope(svc, conf, s, 'dstfs')
except ex.OptNotFound:
pass
try:
_s = conf_get_string_scope(svc, conf, s, 'options')
options += _s.split()
except ex.OptNotFound:
pass
try:
# for backward compat (use options keyword now)
_s = conf_get_string_scope(svc, conf, s, 'exclude')
options += _s.split()
except ex.OptNotFound:
pass
kwargs['options'] = options
try:
kwargs['snap'] = conf_get_boolean_scope(svc, conf, s, 'snap')
except ex.OptNotFound:
pass
try:
_s = conf_get_string_scope(svc, conf, s, 'target')
target = _s.split()
except ex.OptNotFound:
target = []
try:
kwargs['bwlimit'] = conf_get_int_scope(svc, conf, s, 'bwlimit')
except ex.OptNotFound:
pass
targethash = {}
if 'nodes' in target: targethash['nodes'] = svc.nodes
if 'drpnodes' in target: targethash['drpnodes'] = svc.drpnodes
kwargs['target'] = targethash
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs.update(get_sync_args(conf, s, svc))
r = resSyncRsync.Rsync(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
svc += r
def add_task(svc, conf, s):
kwargs = {}
try:
kwargs['command'] = conf_get_string_scope(svc, conf, s, 'command')
except ex.OptNotFound:
svc.log.error("'command' is not defined in config file section %s"%s)
return
try:
kwargs['on_error'] = conf_get_string_scope(svc, conf, s, 'on_error')
except ex.OptNotFound:
pass
try:
kwargs['user'] = conf_get_string_scope(svc, conf, s, 'user')
except ex.OptNotFound:
pass
try:
kwargs['confirmation'] = conf_get_boolean_scope(svc, conf, s, 'confirmation')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
import resTask
r = resTask.Task(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
def add_app(svc, conf, s):
resApp = ximport('resApp')
kwargs = {}
try:
kwargs['script'] = conf_get_string_scope(svc, conf, s, 'script')
except ex.OptNotFound:
svc.log.error("'script' is not defined in config file section %s"%s)
return
try:
kwargs['start'] = conf_get_int_scope(svc, conf, s, 'start')
except ex.OptNotFound:
pass
except:
svc.log.error("config file section %s param %s must be an integer" % (s, 'start'))
return
try:
kwargs['stop'] = conf_get_int_scope(svc, conf, s, 'stop')
except ex.OptNotFound:
pass
except:
svc.log.error("config file section %s param %s must be an integer" % (s, 'stop'))
return
try:
kwargs['check'] = conf_get_int_scope(svc, conf, s, 'check')
except ex.OptNotFound:
pass
except:
svc.log.error("config file section %s param %s must be an integer" % (s, 'check'))
return
try:
kwargs['info'] = conf_get_int_scope(svc, conf, s, 'info')
except ex.OptNotFound:
pass
except:
svc.log.error("config file section %s param %s must be an integer" % (s, 'info'))
return
try:
kwargs['timeout'] = conf_get_int_scope(svc, conf, s, 'timeout')
except ex.OptNotFound:
pass
kwargs['rid'] = s
kwargs['subset'] = get_subset(conf, s, svc)
kwargs['tags'] = get_tags(conf, s, svc)
kwargs['always_on'] = always_on_nodes_set(svc, conf, s)
kwargs['disabled'] = get_disabled(conf, s, svc)
kwargs['optional'] = get_optional(conf, s, svc)
kwargs['monitor'] = get_monitor(conf, s, svc)
kwargs['restart'] = get_restart(conf, s, svc)
r = resApp.App(**kwargs)
add_triggers_and_requires(svc, r, conf, s)
r.pg_settings = get_pg_settings(svc, s)
svc += r
def get_pg_settings(svc, s):
d = {}
if not os.path.exists(svc.paths.cf):
return d
if s != "DEFAULT":
conf = rcConfigParser.RawConfigParser()
import codecs
with codecs.open(svc.paths.cf, "r", "utf8") as f:
if sys.version_info[0] >= 3:
conf.read_file(f)
else:
conf.readfp(f)
import copy
for o in copy.copy(conf.defaults()):
conf.remove_option("DEFAULT", o)
else:
conf = svc.config
try:
d["cpus"] = conf_get_string_scope(svc, conf, s, "pg_cpus")
except ex.OptNotFound:
pass
try:
d["cpu_shares"] = conf_get_string_scope(svc, conf, s, "pg_cpu_shares")
except ex.OptNotFound:
pass
try:
d["cpu_quota"] = conf_get_string_scope(svc, conf, s, "pg_cpu_quota")
except ex.OptNotFound:
pass
try:
d["mems"] = conf_get_string_scope(svc, conf, s, "pg_mems")
except ex.OptNotFound:
pass
try:
d["mem_oom_control"] = conf_get_string_scope(svc, conf, s, "pg_mem_oom_control")
except ex.OptNotFound:
pass
try:
d["mem_limit"] = conf_get_string_scope(svc, conf, s, "pg_mem_limit")
except ex.OptNotFound:
pass
try:
d["mem_swappiness"] = conf_get_string_scope(svc, conf, s, "pg_mem_swappiness")
except ex.OptNotFound:
pass
try:
d["vmem_limit"] = conf_get_string_scope(svc, conf, s, "pg_vmem_limit")
except ex.OptNotFound:
pass
try:
d["blkio_weight"] = conf_get_string_scope(svc, conf, s, "pg_blkio_weight")
except ex.OptNotFound:
pass
return d
def setup_logging(svcnames):
"""Setup logging to stream + logfile, and logfile rotation
class Logger instance name: 'log'
"""
global log
max_svcname_len = 0
# compute max svcname length to align logging stream output
for svcname in svcnames:
n = len(svcname)
if n > max_svcname_len:
max_svcname_len = n
rcLogger.max_svcname_len = max_svcname_len
log = rcLogger.initLogger('init')
def build(name, minimal=False, svcconf=None):
"""build(name) is in charge of Svc creation
it return None if service Name is not managed by local node
else it return new Svc instance
"""
if svcconf is None:
svcconf = os.path.join(rcEnv.pathetc, name) + '.conf'
svcinitd = os.path.join(rcEnv.pathetc, name) + '.d'
#
# node discovery is hidden in a separate module to
# keep it separate from the framework stuff
#
discover_node()
#
# parse service configuration file
# class RawConfigParser instance name: 'conf'
#
svcmode = "hosted"
conf = None
kwargs = {'svcname': name}
if os.path.isfile(svcconf):
conf = rcConfigParser.RawConfigParser()
import codecs
with codecs.open(svcconf, "r", "utf8") as f:
if sys.version_info[0] >= 3:
conf.read_file(f)
else:
conf.readfp(f)
defaults = conf.defaults()
if "mode" in defaults:
svcmode = conf_get_string_scope({}, conf, 'DEFAULT', "mode")
d_nodes = Storage()
d_nodes.svcname = name
try:
encapnodes = [n.lower() for n in conf_get_string_scope(d_nodes, conf, 'DEFAULT', "encapnodes").split() if n != ""]
except ex.OptNotFound:
encapnodes = []
d_nodes['encapnodes'] = set(encapnodes)
try:
nodes = [n.lower() for n in conf_get_string_scope(d_nodes, conf, 'DEFAULT', "nodes").split() if n != ""]
except ex.OptNotFound:
nodes = [rcEnv.nodename]
d_nodes['nodes'] = set(nodes)
try:
drpnodes = [n.lower() for n in conf_get_string_scope(d_nodes, conf, 'DEFAULT', "drpnodes").split() if n != ""]
except ex.OptNotFound:
drpnodes = []
try:
drpnode = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "drpnode").lower()
if drpnode not in drpnodes and drpnode != "":
drpnodes.append(drpnode)
except ex.OptNotFound:
drpnode = ''
d_nodes['drpnodes'] = set(drpnodes)
try:
flex_primary = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "flex_primary").lower()
except ex.OptNotFound:
if len(nodes) > 0:
flex_primary = nodes[0]
else:
flex_primary = ''
d_nodes['flex_primary'] = flex_primary
try:
drp_flex_primary = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "drp_flex_primary").lower()
except ex.OptNotFound:
if len(drpnodes) > 0:
drp_flex_primary = drpnodes[0]
else:
drp_flex_primary = ''
d_nodes['drp_flex_primary'] = drp_flex_primary
if "pkg_name" in defaults:
if svcmode in ["sg", "rhcs", "vcs"]:
kwargs['pkg_name'] = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "pkg_name")
#
# dynamically import the module matching the service mode
# and instanciate a service
#
mod , svc_class_name = svcmode_mod_name(svcmode)
svcMod = __import__(mod)
svc = getattr(svcMod, svc_class_name)(**kwargs)
#
# Store useful properties
#
svc.svcmode = svcmode
svc.initd = svcinitd
svc.config = conf
if hasattr(svc, "builder"):
builder_props = svc.builder_props
svc.builder()
else:
builder_props = []
#
# Store and validate the service type
#
if "env" in defaults:
svc.svc_env = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "env")
elif "service_type" in defaults:
svc.svc_env = conf_get_string_scope(d_nodes, conf, 'DEFAULT', "service_type")
#
# Setup service properties from config file content
#
if "nodes" not in builder_props:
svc.nodes = set(nodes)
if "drpnodes" not in builder_props:
svc.drpnodes = set(drpnodes)
if "drpnode" not in builder_props:
svc.drpnode = drpnode
if "encapnodes" not in builder_props:
svc.encapnodes = set(encapnodes)
if "flex_primary" not in builder_props:
svc.flex_primary = flex_primary
if "drp_flex_primary" not in builder_props:
svc.drp_flex_primary = drp_flex_primary
try:
svc.lock_timeout = conf_get_int_scope(svc, conf, 'DEFAULT', 'lock_timeout')
except ex.OptNotFound:
pass
if conf.has_option('DEFAULT', 'disable'):
svc.disabled = conf.getboolean("DEFAULT", "disable")
else:
pass
try:
svc.presnap_trigger = conf_get_string_scope(svc, conf, 'DEFAULT', 'presnap_trigger').split()
except ex.OptNotFound:
pass
try:
svc.postsnap_trigger = conf_get_string_scope(svc, conf, 'DEFAULT', 'postsnap_trigger').split()
except ex.OptNotFound:
pass
try:
svc.disable_rollback = not conf_get_boolean_scope(svc, conf, 'DEFAULT', "rollback")
except ex.OptNotFound:
pass
if rcEnv.nodename in svc.encapnodes:
svc.encap = True
else:
svc.encap = False
#
# amazon options
#
try:
svc.aws = conf_get_string_scope(svc, conf, "DEFAULT", 'aws')
except ex.OptNotFound:
pass
try:
svc.aws_profile = conf_get_string_scope(svc, conf, "DEFAULT", 'aws_profile')
except ex.OptNotFound:
pass
#
# process group options
#
try:
svc.create_pg = conf_get_boolean_scope(svc, conf, "DEFAULT", 'create_pg')
except ex.OptNotFound:
pass
svc.pg_settings = get_pg_settings(svc, "DEFAULT")
try:
svc.autostart_node = conf_get_string_scope(svc, conf, 'DEFAULT', 'autostart_node').split()
except ex.OptNotFound:
pass
try:
anti_affinity = conf_get_string_scope(svc, conf, 'DEFAULT', 'anti_affinity')
svc.anti_affinity = set(conf_get_string_scope(svc, conf, 'DEFAULT', 'anti_affinity').split())
except ex.OptNotFound:
pass
try:
svc.clustertype = conf_get_string_scope(svc, conf, 'DEFAULT', 'cluster_type')
except ex.OptNotFound:
pass
if 'flex' in svc.clustertype:
svc.ha = True
try:
svc.flex_min_nodes = conf_get_int_scope(svc, conf, 'DEFAULT', 'flex_min_nodes')
except ex.OptNotFound:
svc.flex_min_nodes = 1
if svc.flex_min_nodes < 0:
raise ex.excInitError("invalid flex_min_nodes '%d' (<0)."%svc.flex_min_nodes)
nb_nodes = len(svc.autostart_node)
if nb_nodes == 0:
nb_nodes = 1
if nb_nodes > 0 and svc.flex_min_nodes > nb_nodes:
raise ex.excInitError("invalid flex_min_nodes '%d' (>%d nb of nodes)."%(svc.flex_min_nodes, nb_nodes))
try:
svc.flex_max_nodes = conf_get_int_scope(svc, conf, 'DEFAULT', 'flex_max_nodes')
except ex.OptNotFound:
svc.flex_max_nodes = nb_nodes
if svc.flex_max_nodes < 0:
raise ex.excInitError("invalid flex_max_nodes '%d' (<0)."%svc.flex_max_nodes)
if svc.flex_max_nodes < svc.flex_min_nodes:
raise ex.excInitError("invalid flex_max_nodes '%d' ( 100:
raise ex.excInitError("invalid flex_cpu_low_threshold '%d' (>100)."%svc.flex_cpu_low_threshold)
try:
svc.flex_cpu_high_threshold = conf_get_int_scope(svc, conf, 'DEFAULT', 'flex_cpu_high_threshold')
except ex.OptNotFound:
svc.flex_cpu_high_threshold = 90
if svc.flex_cpu_high_threshold < 0:
raise ex.excInitError("invalid flex_cpu_high_threshold '%d' (<0)."%svc.flex_cpu_high_threshold)
if svc.flex_cpu_high_threshold > 100:
raise ex.excInitError("invalid flex_cpu_high_threshold '%d' (>100)."%svc.flex_cpu_high_threshold)
try:
svc.show_disabled = conf_get_boolean_scope(svc, conf, 'DEFAULT', 'show_disabled')
except ex.OptNotFound:
svc.show_disabled = True
""" prune service whose service type does not match host mode
"""
if svc.svc_env != 'PRD' and rcEnv.node_env == 'PRD':
raise ex.excInitError('not allowed to run on this node (svc env=%s node env=%s)' % (svc.svc_env, rcEnv.node_env))
try:
svc.drp_type = conf_get_string_scope(svc, conf, 'DEFAULT', 'drp_type')
except ex.OptNotFound:
pass
try:
svc.comment = conf_get_string_scope(svc, conf, 'DEFAULT', 'comment')
except ex.OptNotFound:
pass
try:
svc.monitor_action = conf_get_string_scope(svc, conf, 'DEFAULT', "monitor_action")
except ex.OptNotFound:
pass
try:
svc.app = conf_get_string_scope(svc, conf, 'DEFAULT', "app")
except ex.OptNotFound:
pass
try:
svc.drnoaction = conf_get_boolean_scope(svc, conf, 'DEFAULT', "drnoaction")
except ex.OptNotFound:
pass
try:
svc.bwlimit = conf_get_int_scope(svc, conf, 'DEFAULT', "bwlimit")
except ex.OptNotFound:
svc.bwlimit = None
try:
svc.clustername = conf_get_string_scope(svc, conf, 'DEFAULT', "cluster")
except ex.OptNotFound:
pass
if minimal:
return svc
#
# instanciate resources
#
add_containers(svc, conf)
add_resources('hb', svc, conf)
add_resources('stonith', svc, conf)
add_resources('ip', svc, conf)
add_resources('disk', svc, conf)
add_resources('fs', svc, conf)
add_resources('share', svc, conf)
add_resources('app', svc, conf)
add_resources('task', svc, conf)
# deprecated, folded into "disk"
add_resources('vdisk', svc, conf)
add_resources('vmdg', svc, conf)
add_resources('loop', svc, conf)
add_resources('drbd', svc, conf)
add_resources('vg', svc, conf)
add_resources('pool', svc, conf)
add_syncs(svc, conf)
svc.post_build()
return svc
def is_service(f):
if os.name == 'nt':
return True
if os.path.realpath(f) != os.path.realpath(rcEnv.svcmgr):
return False
if not os.path.exists(f + '.conf'):
return False
return True
def list_services():
if not os.path.exists(rcEnv.pathetc):
print("create dir %s"%rcEnv.pathetc)
os.makedirs(rcEnv.pathetc)
s = glob.glob(os.path.join(rcEnv.pathetc, '*.conf'))
s = list(map(lambda x: os.path.basename(x)[:-5], s))
l = []
for name in s:
if len(s) == 0:
continue
if not is_service(os.path.join(rcEnv.pathetc, name)):
continue
l.append(name)
return l
def build_services(status=None, svcnames=None, create_instance=False,
onlyprimary=False, onlysecondary=False, minimal=False):
"""returns a list of all services of status matching the specified status.
If no status is specified, returns all services
"""
import svc
if svcnames is None:
svcnames = []
check_privs()
errors = []
services = {}
if type(svcnames) == str:
svcnames = [svcnames]
if len(svcnames) == 0:
svcnames = list_services()
missing_svcnames = []
else:
all_svcnames = list_services()
missing_svcnames = sorted(list(set(svcnames) - set(all_svcnames)))
for m in missing_svcnames:
if create_instance:
services[m] = svc.Svc(m)
else:
errors.append("%s: service does not exist" % m)
svcnames = list(set(svcnames) & set(all_svcnames))
setup_logging(svcnames)
for name in svcnames:
try:
svc = build(name, minimal=minimal)
except (ex.excError, ex.excInitError) as e:
errors.append("%s: %s" % (name, str(e)))
svclog = rcLogger.initLogger(name, handlers=["file", "syslog"])
svclog.error(str(e))
continue
except ex.excAbortAction:
continue
except:
import traceback
traceback.print_exc()
continue
if status is not None and not svc.status() in status:
continue
if onlyprimary and rcEnv.nodename not in svc.autostart_node:
continue
if onlysecondary and rcEnv.nodename in svc.autostart_node:
continue
services[svc.svcname] = svc
return [ s for n, s in sorted(services.items()) ], errors
def create(svcname, resources=[], interactive=False, provision=False):
if not isinstance(svcname, list):
print("ouch, svcname should be a list object", file=sys.stderr)
return {"ret": 1}
if len(svcname) != 1:
print("you must specify a single service name with the 'create' action", file=sys.stderr)
return {"ret": 1}
svcname = svcname[0]
if len(svcname) == 0:
print("service name must not be empty", file=sys.stderr)
return {"ret": 1}
if svcname in list_services():
print("service", svcname, "already exists", file=sys.stderr)
return {"ret": 1}
cf = os.path.join(rcEnv.pathetc, svcname+'.conf')
if os.path.exists(cf):
import shutil
print(cf, "already exists. save as "+svcname+".conf.bak", file=sys.stderr)
shutil.move(cf, os.path.join(rcEnv.paths.pathtmp, svcname+".conf.bak"))
try:
f = open(cf, 'w')
except:
print("failed to open", cf, "for writing", file=sys.stderr)
return {"ret": 1}
defaults = {}
sections = {}
rtypes = {}
import json
for r in resources:
try:
d = json.loads(r)
except:
print("can not parse resource:", r, file=sys.stderr)
return {"ret": 1}
if 'rid' in d:
section = d['rid']
if '#' not in section:
print(section, "must be formatted as 'rtype#n'", file=sys.stderr)
return {"ret": 1}
l = section.split('#')
if len(l) != 2:
print(section, "must be formatted as 'rtype#n'", file=sys.stderr)
return {"ret": 1}
rtype = l[1]
if rtype in rtypes:
rtypes[rtype] += 1
else:
rtypes[rtype] = 0
del(d['rid'])
if section in sections:
sections[section].update(d)
else:
sections[section] = d
elif 'rtype' in d and d["rtype"] == "env":
del(d["rtype"])
if "env" in sections:
sections["env"].update(d)
else:
sections["env"] = d
elif 'rtype' in d and d["rtype"] != "DEFAULT":
if 'rid' in d:
del(d['rid'])
rtype = d['rtype']
if rtype in rtypes:
section = '%s#%d'%(rtype, rtypes[rtype])
rtypes[rtype] += 1
else:
section = '%s#0'%rtype
rtypes[rtype] = 1
if section in sections:
sections[section].update(d)
else:
sections[section] = d
else:
if "rtype" in d:
del(d["rtype"])
defaults.update(d)
from svcDict import KeyDict, MissKeyNoDefault, KeyInvalidValue
try:
keys = KeyDict(provision=provision)
defaults.update(keys.update('DEFAULT', defaults))
for section, d in sections.items():
sections[section].update(keys.update(section, d))
except (MissKeyNoDefault, KeyInvalidValue):
if not interactive:
return {"ret": 1}
try:
if interactive:
defaults, sections = keys.form(defaults, sections)
except KeyboardInterrupt:
sys.stderr.write("Abort\n")
return {"ret": 1}
conf = rcConfigParser.RawConfigParser(defaults)
for section, d in sections.items():
conf.add_section(section)
for key, val in d.items():
if key == 'rtype':
continue
conf.set(section, key, val)
conf.write(f)
initdir = svcname+'.dir'
svcinitdir = os.path.join(rcEnv.pathetc, initdir)
if not os.path.exists(svcinitdir):
os.makedirs(svcinitdir)
fix_app_link(svcname)
fix_exe_link(rcEnv.svcmgr, svcname)
return {"ret": 0, "rid": sections.keys()}
def fix_app_link(svcname):
os.chdir(rcEnv.pathetc)
src = svcname+'.d'
dst = svcname+'.dir'
if os.name != 'posix':
return
try:
p = os.readlink(src)
except:
if not os.path.exists(dst):
os.makedirs(dst)
os.symlink(dst, src)
def fix_exe_link(dst, src):
if os.name != 'posix':
return
os.chdir(rcEnv.pathetc)
try:
p = os.readlink(src)
except:
os.symlink(dst, src)
p = dst
if p != dst:
os.unlink(src)
os.symlink(dst, src)
opensvc-1.8~20170412/lib/rcStatsDarwin.py 0000644 0001750 0001750 00000011132 13073467726 020171 0 ustar jkelbert jkelbert import os
from rcUtilities import call, which
from rcGlobalEnv import rcEnv
import rcStats
class StatsProvider(rcStats.StatsProvider):
def customfile(self, metric, day):
f = os.path.join(rcEnv.pathvar, 'stats', metric+day)
if os.path.exists(f):
return f
return None
def cpu(self, d, day, start, end):
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'idle',
'nodename']
f = self.sarfile(day)
lines = []
if f is None:
return cols, lines
cmd = ['sar', '-u', '-f', f, '-s', start, '-e', end]
(ret, buff, err) = call(cmd, errlog=False)
for line in buff.split('\n'):
l = line.split()
if len(l) != 5:
continue
if l[1] == '%usr':
continue
if l[0] == 'Average:':
continue
(time, usr, nice, sys, idle) = l
l = ['%s %s'%(d, time), 'all', usr, nice, sys, idle, self.nodename]
lines.append(l)
return cols, lines
def mem_u(self, d, day, start, end):
cols = ['date',
'kbmemfree',
'kbmemused',
'kbbuffers',
'kbcached',
'kbmemsys',
'nodename']
fname = self.customfile('mem_u', day)
lines = []
if fname is None:
return cols, lines
try:
f = open(fname, 'r')
buff = f.read()
f.close()
except:
return cols, lines
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
(time, free, inactive, active, speculative, wired) = l
l = ['%s %s'%(d, time), free, active, speculative, inactive, wired, self.nodename]
lines.append(l)
return cols, lines
def blockdev(self, d, day, start, end):
cols = ['date',
'dev',
'tps',
'rsecps',
'nodename']
f = self.sarfile(day)
lines = []
if f is None:
return cols, lines
cmd = ['sar', '-d', '-f', f, '-s', start, '-e', end]
(ret, buff, err) = call(cmd, errlog=False)
for line in buff.split('\n'):
l = line.split()
if len(l) != 4:
continue
if l[1] == 'device':
continue
if l[1] == 'Disk:':
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def netdev(self, d, day, start, end):
cols = ['date',
'dev',
'rxpckps',
'rxkBps',
'txpckps',
'txkBps',
'nodename']
f = self.sarfile(day)
lines = []
if f is None:
return cols, lines
cmd = ['sar', '-n', 'DEV', '-f', f, '-s', start, '-e', end]
(ret, buff, err) = call(cmd, errlog=False)
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[1] in ['IFACE', 'lo0'] :
continue
if 'dummy' in l[1] or 'vnet' in l[1] or 'veth' in l[1] or \
'gif' in l[1] or 'stf' in l[1]:
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def netdev_err(self, d, day, start, end):
cols = ['date',
'dev',
'rxerrps',
'txerrps',
'collps',
'rxdropps',
'nodename']
f = self.sarfile(day)
lines = []
if f is None:
return cols, lines
cmd = ['sar', '-n', 'EDEV', '-f', f, '-s', start, '-e', end]
(ret, buff, err) = call(cmd, errlog=False)
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[1] in ['IFACE', 'lo0'] :
continue
if 'dummy' in l[1] or 'vnet' in l[1] or 'veth' in l[1] or \
'gif' in l[1] or 'stf' in l[1]:
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
opensvc-1.8~20170412/lib/checkRaidSas2SunOS.py 0000777 0001750 0001750 00000000000 13073467726 023716 2checkRaidSas2.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/rcCloudGandi.py 0000644 0001750 0001750 00000002510 13073467726 017737 0 ustar jkelbert jkelbert import rcCloud
import rcExceptions as ex
import socket
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
except ImportError:
raise ex.excInitError("apache-libcloud module must be installed")
class Cloud(rcCloud.Cloud):
mode = 'gandi'
def __init__(self, s, auth):
rcCloud.Cloud.__init__(self, s, auth)
if 'key' not in auth:
raise ex.excInitError("option 'key' is mandatory in gandi section")
gandi = get_driver(Provider.GANDI)
try:
self.driver = gandi(auth['key'])
except Exception as e:
raise ex.excInitError("error login to gandi cloud %s: %s"%(s, str(e)))
def app_id(self):
return ''
def cloud_id(self):
return mode
def app_cloud_id(self):
return mode
def list_svcnames(self):
l = []
_id = self.app_cloud_id()
try:
vapps = self.driver.list_nodes()
except socket.error as e:
raise ex.excExecError("error connecting to %s cloud manager"%s)
for vapp in vapps:
__id = '.'.join((vapp.name, _id))
for vm in vapp.extra['vms']:
svcname = '.'.join((vm['name'], __id))
l.append((vm['name'], svcname))
return l
opensvc-1.8~20170412/lib/resDiskDrbd.py 0000644 0001750 0001750 00000014465 13073467726 017615 0 ustar jkelbert jkelbert import os
import resources as Res
import rcStatus
import rcExceptions as ex
from rcUtilities import which, justcall
from rcGlobalEnv import rcEnv
class Drbd(Res.Resource):
""" Drbd device resource
The tricky part is that drbd devices can be used as PV
and LV can be used as drbd base devices. Beware of the
the ordering deduced from rids and subsets.
Start 'ups' and promotes the drbd devices to primary.
Stop 'downs' the drbd devices.
"""
def __init__(self,
rid=None,
res=None,
**kwargs):
Res.Resource.__init__(self,
rid,
"disk.drbd",
**kwargs)
self.res = res
self.label = "drbd "+res
self.drbdadm = None
self.disks = set()
def __str__(self):
return "%s resource=%s" % (Res.Resource.__str__(self),\
self.res)
def files_to_sync(self):
cf = os.path.join(os.sep, 'etc', 'drbd.d', self.res+'.res')
if os.path.exists(cf):
return [cf]
self.log.error("%s does not exist"%cf)
return []
def drbdadm_cmd(self, cmd):
if self.drbdadm is None:
if which('drbdadm'):
self.drbdadm = 'drbdadm'
else:
self.log("drbdadm command not found")
raise exc.excError
return [self.drbdadm] + cmd.split() + [self.res]
def devlist(self):
devps = set()
(ret, out, err) = self.call(self.drbdadm_cmd('dump-xml'))
if ret != 0:
raise ex.excError
from xml.etree.ElementTree import XML, fromstring
tree = fromstring(out)
for res in tree.getiterator('resource'):
if res.attrib['name'] != self.res:
continue
for host in res.getiterator('host'):
if host.attrib['name'] != rcEnv.nodename:
continue
d = host.find('device')
if d is None:
d = host.find('volume/device')
if d is None:
continue
devps |= set([d.text])
return devps
def disklist(self):
if self.disks != set():
return self.disks
self.disks = set()
devps = set()
(ret, out, err) = self.call(self.drbdadm_cmd('dump-xml'))
if ret != 0:
raise ex.excError
from xml.etree.ElementTree import XML, fromstring
tree = fromstring(out)
for res in tree.getiterator('resource'):
if res.attrib['name'] != self.res:
continue
for host in res.getiterator('host'):
if host.attrib['name'] != rcEnv.nodename:
continue
d = host.find('disk')
if d is None:
d = host.find('volume/disk')
if d is None:
continue
devps |= set([d.text])
try:
u = __import__('rcUtilities'+rcEnv.sysname)
self.disks = u.devs_to_disks(self, devps)
except:
self.disks = devps
return self.disks
def drbdadm_down(self):
(ret, out, err) = self.vcall(self.drbdadm_cmd('down'))
if ret != 0:
raise ex.excError
def drbdadm_up(self):
(ret, out, err) = self.vcall(self.drbdadm_cmd('up'))
if ret != 0:
raise ex.excError
def get_cstate(self):
self.prereq()
(out, err, ret) = justcall(self.drbdadm_cmd('cstate'))
if ret != 0:
if "Device minor not allocated" in err:
return "Unattached"
else:
raise ex.excError
return out.strip()
def prereq(self):
if not os.path.exists("/proc/drbd"):
(ret, out, err) = self.vcall(['modprobe', 'drbd'])
if ret != 0: raise ex.excError
def start_connection(self):
cstate = self.get_cstate()
if cstate == "Connected":
self.log.info("drbd resource %s is already up"%self.res)
elif cstate == "StandAlone":
self.drbdadm_down()
self.drbdadm_up()
elif cstate == "WFConnection":
self.log.info("drbd resource %s peer node is not listening"%self.res)
pass
else:
self.drbdadm_up()
def get_roles(self):
out, err, ret = justcall(self.drbdadm_cmd('role'))
if ret != 0:
raise ex.excError(err)
out = out.strip().split('/')
if len(out) != 2:
raise ex.excError(out)
return out
def start_role(self, role):
roles = self.get_roles()
if roles[0] != role:
(ret, out, err) = self.vcall(self.drbdadm_cmd(role.lower()))
if ret != 0:
raise ex.excError
else:
self.log.info("drbd resource %s is already %s"%(self.res, role))
def startstandby(self):
self.start_connection()
roles = self.get_roles()
if roles[0] == "Primary":
return
self.start_role('Secondary')
self.can_rollback = True
def stopstandby(self):
self.start_connection()
roles = self.get_roles()
if roles[0] == "Secondary":
return
self.start_role('Secondary')
def start(self):
self.start_connection()
self.start_role('Primary')
self.can_rollback = True
def stop(self):
self.drbdadm_down()
def _status(self, verbose=False):
try:
roles = self.get_roles()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
self.status_log(str(roles[0]), "info")
(ret, out, err) = self.call(self.drbdadm_cmd('dstate'))
if ret != 0:
self.status_log("drbdadm dstate %s failed"%self.res)
return rcStatus.WARN
out = out.strip()
if out == "UpToDate/UpToDate":
return self.status_stdby(rcStatus.UP)
elif out == "Unconfigured":
return self.status_stdby(rcStatus.DOWN)
self.status_log("unexpected drbd resource %s state: %s"%(self.res, out))
return rcStatus.WARN
if __name__ == "__main__":
help(Drbd)
v = Drbd(res='test')
print(v)
opensvc-1.8~20170412/lib/rcAssetLinux.py 0000644 0001750 0001750 00000041601 13073467726 020031 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import justcall, which, bdecode
import rcAsset
def is_container():
p = '/proc/1/environ'
if not os.path.exists(p):
return False
with open(p, 'r') as f:
buff = f.read()
if "container=lxc" in bdecode(buff):
return True
return False
class Asset(rcAsset.Asset):
def __init__(self, node):
rcAsset.Asset.__init__(self, node)
self.container = is_container()
self.detect_xen()
if self.container:
self.dmidecode = []
else:
out, err, ret = justcall(['dmidecode'])
if ret != 0:
self.dmidecode = []
else:
self.dmidecode = out.split('\n')
def _get_mem_bytes_esx(self):
cmd = ['vmware-cmd', '-s', 'getresource', 'system.mem.totalMem']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
l = out.split(' = ')
if len(l) < 2:
return '0'
try:
size = str(int(l[-1])/1024)
except:
size = '0'
return size
def _get_mem_bytes_hv(self):
if which('virsh'):
return self._get_mem_bytes_virsh()
if which('xm'):
return self._get_mem_bytes_xm()
else:
return '0'
def _get_mem_bytes_virsh(self):
cmd = ['virsh', 'nodeinfo']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
lines = out.split('\n')
for line in lines:
if 'Memory size' not in line:
continue
l = line.split()
if len(l) < 2:
continue
return l[-2]
return '0'
def _get_mem_bytes_xm(self):
cmd = ['xm', 'info']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
lines = out.split('\n')
for line in lines:
if 'total_mem' not in line:
continue
l = line.split(':')
if len(l) < 2:
continue
return l[-1]
return '0'
def _get_mem_bytes_phy(self):
cmd = ['free', '-m']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
lines = out.split('\n')
if len(lines) < 2:
return '0'
line = lines[1].split()
if len(line) < 2:
return '0'
return line[1]
def detect_xen(self):
c = os.path.join(os.sep, 'sys', 'hypervisor', 'uuid')
self.xenguest = False
self.xenhv = False
if not os.path.exists(c):
return
with open(c, 'r') as f:
if '00000000-0000-0000-0000-000000000000' in f.read():
self.xenhv = True
else:
self.xenguest = True
def is_esx_hv(self):
return which('vmware-cmd')
def _get_mem_bytes(self):
if self.xenhv:
return self._get_mem_bytes_hv()
elif self.is_esx_hv():
s = self._get_mem_bytes_esx()
if s == '0':
return self._get_mem_bytes_phy()
else:
return self._get_mem_bytes_phy()
def _get_mem_banks(self):
if self.container:
return 'n/a'
banks = 0
inBlock = False
for l in self.dmidecode:
if not inBlock and l == "Memory Device":
inBlock = True
if inBlock and "Size:" in l:
e = l.split()
if len(e) == 3:
try:
size = int(e[1])
banks += 1
except:
pass
return str(banks)
def _get_mem_slots(self):
if self.container:
return 'n/a'
for l in self.dmidecode:
if 'Number Of Devices:' in l:
return l.split()[-1]
return '0'
def _get_os_vendor(self):
if os.path.exists('/etc/lsb-release'):
with open('/etc/lsb-release') as f:
for line in f.readlines():
if 'DISTRIB_ID' in line:
return line.split('=')[-1].replace('\n','').strip('"')
if os.path.exists('/etc/debian_version'):
return 'Debian'
if os.path.exists('/etc/SuSE-release'):
return 'SuSE'
if os.path.exists('/etc/vmware-release'):
return 'VMware'
if os.path.exists('/etc/oracle-release'):
return 'Oracle'
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as f:
buff = f.read()
if 'CentOS' in buff:
return 'CentOS'
elif 'Oracle' in buff:
return 'Oracle'
else:
return 'Red Hat'
return 'Unknown'
def _get_os_release_lsb(self):
if not os.path.exists('/etc/lsb-release'):
return
with open('/etc/lsb-release') as f:
for line in f.readlines():
if 'DISTRIB_RELEASE' in line:
r = line.split('=')[-1].replace('\n','').strip('"')
r = r.replace(self._get_os_vendor(), '').strip()
if r == "":
continue
return r
if 'DISTRIB_DESCRIPTION' in line:
r = line.split('=')[-1].replace('\n','').strip('"')
r = r.replace(self._get_os_vendor(), '').strip()
if r == "":
continue
return r
return
def _get_os_release_debian_version(self):
if not os.path.exists('/etc/debian_version'):
return
with open('/etc/debian_version') as f:
r = f.read().strip()
if r == "":
return
return r
def _get_os_release(self):
files = ['/etc/debian_version',
'/etc/vmware-release',
'/etc/oracle-release',
'/etc/redhat-release']
if os.path.exists('/etc/SuSE-release'):
v = []
with open('/etc/SuSE-release') as f:
for line in f.readlines():
if 'VERSION' in line:
v += [line.split('=')[-1].replace('\n','').strip('" ')]
if 'PATCHLEVEL' in line:
v += [line.split('=')[-1].replace('\n','').strip('" ')]
return '.'.join(v)
r = self._get_os_release_lsb()
if r:
return r
r = self._get_os_release_debian_version()
if r:
return r
if os.path.exists('/etc/oracle-release') and \
os.path.exists('/etc/redhat-release'):
with open('/etc/oracle-release') as f1:
if " VM " in f1.read():
with open('/etc/redhat-release') as f2:
return f2.read().split('\n')[0].replace(self._get_os_vendor(), '').strip()
for f in files:
if os.path.exists(f):
(out, err, ret) = justcall(['cat', f])
if ret != 0:
return 'Unknown'
return out.split('\n')[0].replace(self._get_os_vendor(), '').strip()
return 'Unknown'
def _get_os_kernel(self):
(out, err, ret) = justcall(['uname', '-r'])
if ret != 0:
return 'Unknown'
return out.split('\n')[0]
def _get_os_arch(self):
if which('arch') is not None:
cmd = ['arch']
else:
cmd = ['uname', '-m']
out, err, ret = justcall(cmd)
if ret != 0:
return 'Unknown'
return out.split('\n')[0]
def _get_cpu_freq(self):
p = '/proc/cpuinfo'
if not os.path.exists(p):
return 'Unknown'
with open(p, 'r') as f:
for line in f.readlines():
if 'cpu MHz' in line:
return line.split(':')[1].strip().split('.')[0]
return 'Unknown'
def _get_cpu_cores(self):
if self.container:
return 'n/a'
try:
with open('/proc/cpuinfo') as f:
lines = f.readlines()
except:
return '0'
phy = {}
for line in lines:
if 'physical id' in line:
id = line.split(":")[-1].strip()
if id not in phy:
phy[id] = []
elif 'core id' in line:
coreid = line.split(":")[-1].strip()
if coreid not in phy[id]:
phy[id].append(coreid)
n_cores = 0
for id, coreids in phy.items():
n_cores += len(coreids)
if n_cores == 0:
return self._get_cpu_dies()
return str(n_cores)
def _get_cpu_dies_dmi(self):
if self.container:
return 'n/a'
n = 0
for l in self.dmidecode:
if 'Processor Information' in l:
n += 1
return str(n)
def _get_cpu_dies_cpuinfo(self):
if self.container:
return 'n/a'
try:
with open('/proc/cpuinfo') as f:
lines = f.readlines()
except:
return '0'
_lines = set([l for l in lines if 'physical id' in l])
n_dies = len(_lines)
if n_dies > 0:
return str(n_dies)
# vmware do not show processor physical id
_lines = [l for l in lines if 'processor' in l]
n_dies = len(_lines)
return str(n_dies)
def _get_cpu_threads(self):
if self.container:
return 'n/a'
try:
with open('/proc/cpuinfo') as f:
lines = f.readlines()
except:
return '0'
lines = [l for l in lines if 'physical id' in l]
n_threads = len(lines)
if n_threads == 0:
return self._get_cpu_dies()
return str(len(lines))
def _get_cpu_dies(self):
n = self._get_cpu_dies_cpuinfo()
if n == '0':
n = self._get_cpu_dies_dmi()
return n
def _get_cpu_model(self):
(out, err, ret) = justcall(['grep', 'model name', '/proc/cpuinfo'])
if ret != 0:
return 'Unknown'
lines = out.split('\n')
l = lines[0].split(':')
return l[1].strip()
def _get_serial_1(self):
try:
i = self.dmidecode.index('System Information')
except ValueError:
return 'Unknown'
for l in self.dmidecode[i+1:]:
if 'Serial Number:' in l:
return l.split(':')[-1].strip()
return 'Unknown'
def _get_serial_2(self):
""" Dell poweredge 2500 are known to be in this case
"""
try:
i = self.dmidecode.index('Chassis Information')
except ValueError:
return 'Unknown'
for l in self.dmidecode[i+1:]:
if 'Serial Number:' in l:
return l.split(':')[-1].strip()
return 'Unknown'
def _get_serial(self):
if self.container:
return 'n/a'
serial = self._get_serial_1()
if serial in ('Unknown', 'Not Specified'):
serial = self._get_serial_2()
return serial
def _get_bios_version(self):
if self.container:
return 'n/a'
v = ""
rev = ""
try:
i = self.dmidecode.index('BIOS Information')
except ValueError:
return ''
for l in self.dmidecode[i+1:]:
if 'Version:' in l:
v = l.split(':')[-1].strip()
break
for l in self.dmidecode[i+1:]:
if 'BIOS Revision:' in l:
rev = l.split(':')[-1].strip()
break
if len(rev) > 1 and not v.startswith(rev):
return v+" "+rev
return v
def _get_sp_version(self):
if self.container:
return 'n/a'
sp_version = self._get_sp_version_ipmi()
if sp_version:
return sp_version
return ''
def _get_sp_version_ipmi(self):
if which("ipmitool") is None:
return
cmd = ["ipmitool", "mc", "info"]
out, err, ret = justcall(cmd)
if ret != 0:
return
for l in out.splitlines():
if 'Firmware Revision' in l:
v = l.split(' : ')[-1].strip()
return v
return v
def _get_enclosure(self):
if self.container:
return 'n/a'
for l in self.dmidecode:
if 'Enclosure Name:' in l:
return l.split(':')[-1].strip()
return 'Unknown'
def _get_model(self):
if self.container:
return 'container'
elif self.xenguest and len(self.dmidecode) < 5:
return "Xen Virtual Machine (PVM)"
for l in self.dmidecode:
if 'Product Name:' in l:
return l.split(':')[-1].strip()
return 'Unknown'
def get_iscsi_hba_id(self):
path = os.path.join(os.sep, 'etc', 'iscsi', 'initiatorname.iscsi')
hba_id = None
if os.path.exists(path):
with open(path, 'r') as f:
hba_id = f.read().split('=')[-1].strip()
return hba_id
def __get_hba(self):
# fc / fcoe
l = []
import glob
paths = glob.glob('/sys/class/fc_host/host*/port_name')
for path in paths:
host_link = '/'.join(path.split('/')[0:5])
if '/eth' in os.path.realpath(host_link):
hba_type = 'fcoe'
else:
hba_type = 'fc'
with open(path, 'r') as f:
hba_id = f.read().strip('0x').strip('\n')
host = path.replace('/sys/class/fc_host/host', '')
host = host[0:host.index('/')]
l.append((hba_id, hba_type, host))
# redhat 4 qla driver does not export hba portname in sysfs
paths = glob.glob("/proc/scsi/qla2xxx/*")
for path in paths:
with open(path, 'r') as f:
buff = f.read()
for line in buff.split("\n"):
if "adapter-port" not in line:
continue
_l = line.split("=")
if len(_l) != 2:
continue
host = os.path.basename(path)
e = (_l[1].rstrip(";"), "fc", host)
if e not in l:
l.append(e)
# iscsi
path = os.path.join(os.sep, 'etc', 'iscsi', 'initiatorname.iscsi')
hba_type = 'iscsi'
hba_id = self.get_iscsi_hba_id()
if hba_id is not None:
l.append((hba_id, hba_type, ''))
# gce
if self._get_model() == "Google":
from rcGlobalEnv import rcEnv
l.append((rcEnv.nodename, "virtual", ''))
return l
def _get_hba(self):
return map(lambda x: (x[0], x[1]), self.__get_hba())
def _get_targets(self):
import glob
# fc / fcoe
l = []
hbas = self.__get_hba()
for hba_id, hba_type, host in hbas:
if not hba_type.startswith('fc'):
continue
targets = glob.glob('/sys/class/fc_transport/target%s:*/port_name'%host)
targets += glob.glob('/sys/class/fc_remote_ports/rport-%s:*/port_name'%host)
for target in targets:
with open(target, 'r') as f:
tgt_id = f.read().strip('0x').strip('\n')
if (hba_id, tgt_id) not in l:
l.append((hba_id, tgt_id))
# iscsi
hba_id = self.get_iscsi_hba_id()
if hba_id is not None:
cmd = ['iscsiadm', '-m', 'session']
out, err, ret = justcall(cmd)
if ret == 0:
"""
tcp: [1] 192.168.231.141:3260,1 iqn.2000-08.com.datacore:sds1-1
tcp: [2] 192.168.231.142:3260,1 iqn.2000-08.com.datacore:sds2-1 (non-flash)
"""
for line in out.split('\n'):
if len(line) == 0:
continue
line = line.replace(" (non-flash)", "")
l.append((hba_id, line.split()[-1]))
# gce
if self._get_model() == "Google":
try:
cmd = ["gcloud", "compute", "regions", "list", "-q", "--format", "json"]
out, err, ret = justcall(cmd)
import json
from rcGlobalEnv import rcEnv
data = json.loads(out)
hba_id = rcEnv.nodename
for region in data:
i = region["selfLink"].index("/projects")
tgt_id = region["selfLink"][i:].replace("/projects", "").replace("/regions", "")
l.append((hba_id, tgt_id))
except:
pass
return l
if __name__ == "__main__":
from rcGlobalEnv import rcEnv
import json
print(json.dumps(Asset(rcEnv.nodename).get_asset_dict(), indent=4))
opensvc-1.8~20170412/lib/rcIfconfigWindows.py 0000644 0001750 0001750 00000002466 13073467726 021037 0 ustar jkelbert jkelbert from subprocess import *
import rcIfconfig
import wmi
class ifconfig(rcIfconfig.ifconfig):
def parse(self, intf, intf_cf):
if intf_cf.IPAddress is None:
return
i = rcIfconfig.interface(intf.NetConnectionID)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = ''
i.mask = []
i.mtu = intf_cf.MTU
i.ipaddr = []
i.ip6addr = []
i.ip6mask = []
i.hwaddr = intf_cf.MACAddress
try:
i.flag_up = intf.NetEnabled
except:
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_loopback = False
for idx, ip in enumerate(intf_cf.IPAddress):
if ":" in ip:
i.ip6addr.append(ip)
i.ip6mask.append(intf_cf.IPsubnet[idx])
else:
i.ipaddr.append(ip)
i.mask.append(intf_cf.IPsubnet[idx])
def __init__(self, mcast=False):
self.wmi = wmi.WMI()
self.intf = []
self.mcast_data = {}
for n, nc in zip(self.wmi.Win32_NetworkAdapter(), self.wmi.Win32_NetworkAdapterConfiguration()):
self.parse(n, nc)
if __name__ == "__main__" :
o = ifconfig()
print(o)
opensvc-1.8~20170412/lib/resFs.py 0000644 0001750 0001750 00000015470 13073467726 016474 0 ustar jkelbert jkelbert import os
import resources as Res
import rcExceptions as ex
import rcStatus
from rcGlobalEnv import rcEnv
from rcUtilities import which, mimport
class Mount(Res.Resource):
"""Define a mount resource
"""
def __init__(self,
rid=None,
mount_point=None,
device=None,
fs_type=None,
mount_options=None,
snap_size=None,
**kwargs):
Res.Resource.__init__(self,
rid=rid,
type="fs",
**kwargs)
self.mount_point = mount_point
self.device = device
self.fs_type = fs_type
self.mount_options = mount_options
self.snap_size = snap_size
self.label = device + '@' + mount_point
if self.fs_type != "none":
self.label = self.fs_type + " " + self.label
self.fsck_h = {}
self.testfile = os.path.join(mount_point, '.opensvc')
self.netfs = ['nfs', 'nfs4', 'cifs', 'smbfs', '9pfs', 'gpfs', 'afs', 'ncpfs']
def info(self):
data = [
["dev", self.device],
["mnt", self.mount_point],
["mnt_opt", self.mount_options if self.mount_options else ""],
]
return self.fmt_info(data)
def start(self):
self.validate_dev()
self.create_mntpt()
def validate_dev(self):
if self.fs_type in ["zfs", "advfs"] + self.netfs:
return
if self.device == "none":
# pseudo fs have no dev
return
if self.device.startswith("UUID=") or self.device.startswith("LABEL="):
return
if not os.path.exists(self.device):
raise ex.excError("device does not exist %s" % self.device)
def create_mntpt(self):
if self.fs_type in ["zfs", "advfs"]:
return
if os.path.exists(self.mount_point):
return
try:
os.makedirs(self.mount_point)
self.log.info("create missing mountpoint %s" % self.mount_point)
except:
self.log.warning("failed to create missing mountpoint %s" % self.mount_point)
def fsck(self):
if self.fs_type in ("", "none") or os.path.isdir(self.device):
# bind mounts are in this case
return
if self.fs_type not in self.fsck_h:
self.log.debug("no fsck method for %s"%self.fs_type)
return
bin = self.fsck_h[self.fs_type]['bin']
if which(bin) is None:
self.log.warning("%s not found. bypass."%self.fs_type)
return
if 'reportcmd' in self.fsck_h[self.fs_type]:
cmd = self.fsck_h[self.fs_type]['reportcmd']
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret not in self.fsck_h[self.fs_type]['reportclean']:
return
cmd = self.fsck_h[self.fs_type]['cmd']
(ret, out, err) = self.vcall(cmd)
if 'allowed_ret' in self.fsck_h[self.fs_type]:
allowed_ret = self.fsck_h[self.fs_type]['allowed_ret']
else:
allowed_ret = [0]
if ret not in allowed_ret:
raise ex.excError
def need_check_writable(self):
if 'ro' in self.mount_options.split(','):
return False
if self.fs_type in self.netfs:
return False
return True
def can_check_writable(self):
""" orverload in child classes to check os-specific conditions
when a write test might hang (solaris lockfs, linux multipath
with queueing on and no active path)
"""
return True
@staticmethod
def alarm_handler(signum, frame):
raise ex.excSignal
def check_stat(self):
if which("stat") is None:
return True
import signal, subprocess
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(5)
try:
proc = subprocess.Popen('stat '+self.device, shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = proc.communicate()
signal.alarm(0)
except ex.excSignal:
return False
return True
def check_writable(self):
if not self.can_check_writable():
return False
try:
f = open(self.testfile, 'w')
f.write(' ')
f.close()
except IOError as e:
if e.errno == 28:
self.log.error('No space left on device. Invalidate writable test.')
return True
return False
except:
return False
return True
def _status(self, verbose=False):
if self.is_up():
if not self.check_stat():
self.status_log("fs is not responding to stat")
return rcStatus.WARN
if self.need_check_writable() and not self.check_writable():
self.status_log("fs is not writable")
return rcStatus.WARN
return self.status_stdby(rcStatus.UP)
else:
return self.status_stdby(rcStatus.DOWN)
def devlist(self):
pseudofs = [
'lofs',
'none',
'proc',
'sysfs',
]
if self.fs_type in pseudofs + self.netfs:
return set([])
for res in self.svc.get_resources():
if hasattr(res, "is_child_dev") and res.is_child_dev(self.device):
# don't account fs device if the parent resource is driven by the service
return set([])
return set([self.device])
def __str__(self):
return "%s mnt=%s dev=%s fs_type=%s mount_options=%s" % (Res.Resource.__str__(self),\
self.mount_point, self.device, self.fs_type, self.mount_options)
def __lt__(self, other):
"""
Order so that deepest mountpoint can be umount first.
"""
return self.mount_point < other.mount_point
def provision(self):
m = mimport("prov", "fs", self.fs_type, fallback=True)
if not hasattr(m, "ProvisioningFs"):
raise ex.excError("missing ProvisioningFs class in module %s" % str(m))
prov = getattr(m, "ProvisioningFs")(self)
prov.provisioner()
def unprovision(self):
m = mimport("prov", "fs", self.fs_type, fallback=True)
if not hasattr(m, "ProvisioningFs"):
raise ex.excError("missing ProvisioningFs class in module %s" % str(m))
prov = getattr(m, "ProvisioningFs")(self)
if hasattr(prov, "unprovisioner"):
prov.unprovisioner()
if __name__ == "__main__":
for c in (Mount,) :
help(c)
print(""" m=Mount("/mnt1","/dev/sda1","ext3","rw") """)
m=Mount("/mnt1","/dev/sda1","ext3","rw")
print("show m", m)
opensvc-1.8~20170412/lib/resDiskMdLinux.py 0000644 0001750 0001750 00000017412 13073467726 020315 0 ustar jkelbert jkelbert import re
import os
import glob
import time
import rcExceptions as ex
import resDisk
import rcStatus
from rcGlobalEnv import rcEnv
from rcUtilitiesLinux import major, get_blockdev_sd_slaves, \
devs_to_disks
from rcUtilities import which, justcall
class Disk(resDisk.Disk):
startup_timeout = 10
def __init__(self,
rid=None,
uuid=None,
shared=False,
**kwargs):
self.label = "md " + uuid
self.uuid = uuid
self.shared = shared
self.mdadm = "/sbin/mdadm"
resDisk.Disk.__init__(self,
rid=rid,
name=uuid,
type='disk.md',
**kwargs)
def info(self):
data = [
["uuid", self.uuid],
["shared", str(self.shared).lower()],
]
return self.fmt_info(data)
def md_config_file_name(self):
return os.path.join(rcEnv.pathvar, 'md_' + self.md_devname() + '.disklist')
def md_config_import(self):
p = self.md_config_file_name()
if not os.path.exists(p):
return set()
with open(p, "r") as f:
buff = f.read()
disks = set(buff.split("\n"))
disks -= set([""])
return disks
def md_config_export(self):
from rcDevTreeLinux import DevTree
dt = DevTree()
dt.load()
disks = self.devlist()
disk_ids = set()
for disk in disks:
treedev = dt.get_dev_by_devpath(disk)
if not treedev:
continue
disk_ids.add(treedev.alias)
with open(self.md_config_file_name(), "w") as f:
f.write("\n".join(disk_ids))
def postsync(self):
pass
def down_state_alerts(self):
if not self.shared:
return
devnames = self.md_config_import()
devnames = set([d for d in devnames if not d.startswith("md")])
if len(devnames) == 0:
return
from rcDevTreeLinux import DevTree
dt = DevTree()
dt.load()
aliases = set([d.alias for d in dt.dev.values()])
not_found = devnames - aliases
if len(not_found) > 0:
self.status_log("md member missing: %s" % ", ".join(sorted(list(not_found))))
def presync(self):
if not self.shared:
return
s = self.svc.group_status(excluded_groups=set(["app", "sync", "hb"]))
if self.svc.options.force or s['overall'].status == rcStatus.UP:
self.md_config_export()
def files_to_sync(self):
if not self.shared:
return []
return [self.md_config_file_name()]
def md_devname(self):
#devname = self.svc.svcname+"."+self.rid.replace("#", "")
return self.uuid
def md_devpath(self):
devname = self.md_devname()
l = glob.glob("/dev/disk/by-id/md-uuid-"+self.uuid) + glob.glob("/dev/md/"+devname)
if len(l) == 0:
raise ex.excError("unable to find a devpath for md")
return l[0]
def devpath(self):
return "/dev/md/"+self.uuid
def assemble(self):
cmd = [self.mdadm, "--assemble", self.devpath(), "-u", self.uuid]
ret, out, err = self.vcall(cmd, warn_to_info=True)
if ret == 2:
self.log.info("no changes were made to the array")
elif ret != 0:
raise ex.excError
else:
self.wait_for_fn(self.has_it, self.startup_timeout, 1, errmsg="waited too long for devpath creation")
def manage_stop(self):
cmd = [self.mdadm, "--manage", self.md_devpath(), "--stop"]
ret, out, err = self.vcall(cmd, warn_to_info=True)
if ret != 0:
raise ex.excError
def detail(self):
try:
devpath = self.md_devpath()
except ex.excError as e:
return "State : " + str(e)
if not os.path.exists(devpath):
return "State : devpath does not exist"
cmd = [self.mdadm, "--detail", devpath]
out, err, ret = justcall(cmd)
if "cannot open /dev" in err:
return "State : devpath does not exist"
if ret != 0:
if "does not appear to be active" in err:
return "State : md does not appear to be active"
raise ex.excError(err)
return out
def detail_status(self):
buff = self.detail()
for line in buff.split("\n"):
line = line.strip()
if line.startswith("State :"):
return line.split(" : ")[-1]
return "unknown"
def has_it(self):
state = self.detail_status().split(", ")[0]
states = (
"clean",
"active",
)
if state in states:
return True
return False
def is_up(self):
if not self.has_it():
return False
state = self.detail_status()
if state in ("clean", "active"):
return True
if state != "devpath does not exist":
self.status_log(state)
return True
def _status(self, verbose=False):
s = resDisk.Disk._status(self, verbose=verbose)
if s in (rcStatus.STDBY_DOWN, rcStatus.DOWN):
self.down_state_alerts()
return s
def do_start(self):
if self.has_it():
self.log.info("md %s is already assembled" % self.uuid)
return 0
self.can_rollback = True
self.assemble()
self._create_static_name()
def do_stop(self):
if not self.has_it():
self.log.info("md %s is already down" % self.uuid)
return
self.manage_stop()
def _create_static_name(self):
self.create_static_name(self.md_devpath())
def devlist(self):
if self.devs != set():
return self.devs
try:
devpath = self.md_devpath()
except ex.excError as e:
return self.devlist_inactive()
if os.path.exists(devpath):
self.devs = self.devlist_active()
else:
self.devs = self.devlist_inactive()
return self.devs
def devlist_inactive(self):
devs = set()
cmd = [self.mdadm, "-E", "--scan", "-v"]
out, err, ret = justcall(cmd)
if ret != 0:
return devs
lines = out.split("\n")
if len(lines) < 2:
return set()
inblock = False
for line in lines:
if "UUID="+self.uuid in line:
inblock = True
continue
if inblock and "devices=" in line:
l = line.split("devices=")[-1].split(",")
l = map(lambda x: os.path.realpath(x), l)
devs |= set(l)
break
self.log.debug("found devs %s held by md %s" % (devs, self.uuid))
return devs
def devlist_active(self):
devs = set()
try:
lines = self.detail().split("\n")
except ex.excError as e:
return set()
if len(lines) < 2:
return set()
for line in lines[1:]:
if "/dev/" not in line:
continue
devpath = line.split()[-1]
devpath = os.path.realpath(devpath)
devs.add(devpath)
self.log.debug("found devs %s held by md %s" % (devs, self.uuid))
return devs
def disklist(self):
if self.disks != set():
return self.disks
self.disks = set()
members = self.devlist()
self.disks = devs_to_disks(self, members)
self.log.debug("found disks %s held by md %s" % (self.disks, self.uuid))
return self.disks
def provision(self):
m = __import__("provDiskMdLinux")
prov = getattr(m, "ProvisioningDisk")(self)
prov.provisioner()
opensvc-1.8~20170412/lib/checkMpathSunOS.py 0000644 0001750 0001750 00000005077 13073467726 020413 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
"""
# mpathadm list LU
/dev/rdsk/c6t600507680280809AB0000000000000E7d0s2
Total Path Count: 4
Operational Path Count: 4
/scsi_vhci/disk@g60050768018085d7e0000000000004e5
Total Path Count: 1
Operational Path Count: 1
/dev/rdsk/c6t60050768018085D7E0000000000004E4d0s2
Total Path Count: 1
Operational Path Count: 1
/dev/rdsk/c6t60050768018085D7E00000000000056Bd0s2
Total Path Count: 4
Operational Path Count: 4
"""
chk_type = "mpath"
svcdevs = {}
def find_svc(self, dev):
for svc in self.svcs:
if svc not in self.svcdevs:
try:
devs = svc.disklist()
except Exception as e:
devs = []
self.svcdevs[svc] = devs
if dev in self.svcdevs[svc]:
return svc.svcname
return ''
def do_check(self):
cmd = ['mpathadm', 'list', 'LU']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 4:
return self.undef
r = []
dev = None
wwid = ""
for line in lines:
if "/dev/" in line:
# new mpath
# - remember current dev
# - remember current wwid
# - reset path counter
dev = line.strip()
wwid = line[line.index('t')+1:line.rindex('d')]
n = 0
elif '/disk@g' in line:
# unmapped dev
# - remember current dev
# - remember current wwid
# - reset path counter
dev = line.strip()
wwid = '_'+line[line.index('@g')+2:]
n = 0
if "Total Path Count:" in line:
continue
if "Operational Path Count:" in line:
# - store current dev if valid
# - then:
# - reset path counter
# - reset dev
n = int(line.split(':')[-1].strip())
if dev is not None:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
dev = None
n = 0
return r
opensvc-1.8~20170412/lib/snap.py 0000644 0001750 0001750 00000010344 13073467726 016346 0 ustar jkelbert jkelbert import os
import resources as Res
import rcExceptions as ex
def find_mount(rs, dir):
"""Sort mounts from deepest to shallowest and return the
first mount whose 'mount_point' is matching 'dir'
"""
for m in sorted(rs.resources, reverse=True):
if m.is_disabled():
continue
if m.mount_point in dir:
return m
return None
def find_mounts(self, mounts_h):
rs = self.svc.get_resourcesets("fs")[0]
if rs is None:
self.log.error("can not find fs resources encapsulating %s to snap (no fs resources)"%self.src)
raise ex.syncNotSnapable
for src in self.src:
m = find_mount(rs, src)
if m is None:
self.log.error("can not find fs resources encapsulating %s to snap"%src)
raise ex.syncNotSnapable
mounts_h[src] = m
return mounts_h
class Snap(Res.Resource):
"""Defines a snap object
"""
def __init__(self, rid, optional=False, disabled=False, tags=set([])):
self.snaps = {}
Res.Resource.__init__(self, rid, "sync.snap", optional=optional,\
disabled=disabled, tags=tags)
def try_snap(self, rset, action, rid=None):
if action == "nodes":
action = "sync_nodes"
if action == "drpnodes":
action = "sync_drp"
mounts_h = {}
for r in rset.resources:
""" if rid is set, snap only the specified resource.
Used by resources tagged 'delay_snap' on sync()
if rid is not set, don't snap resources tagged 'delay_snap'
(pre_action() code path)
"""
if rid is None:
if "delay_snap" in r.tags:
continue
elif rid != r.rid:
continue
if r.is_disabled():
continue
if r.snap is not True and r.snap is not False:
self.log.error("service configuration error: 'snap' must be 'true' or 'false'. default is 'false'")
raise ex.syncConfigSyntaxError
if not r.snap:
continue
if (action == "sync_nodes" and not 'nodes' in r.target) or \
(action == "sync_drp" and not 'drpnodes' in r.target):
self.log.debug("action %s but resource target is %s"%(action, r.target))
continue
mounts_h = find_mounts(r, mounts_h)
mounts = set(mounts_h.values())
for m in mounts:
try:
self.snapcreate(m)
except ex.syncNotSnapable:
self.log.error("Resource not snapable: "+m.__str__())
continue
except (ex.syncNotSnapable, ex.syncSnapExists, ex.syncSnapMountError,
ex.syncSnapCreateError, ex.syncSnapDestroyError):
"""Clean up the mess
"""
self.snap_cleanup(rset)
raise ex.excError
except:
raise
"""Update src dirs of every sync resource to point to an
existing snap
"""
for i, r in enumerate(rset.resources):
r.alt_src = list(r.src)
for j, src in enumerate(r.alt_src):
if src not in mounts_h:
continue
mnt = mounts_h[src].mount_point
if mnt not in self.snaps:
continue
snap_mnt = self.snaps[mnt]['snap_mnt']
rset.resources[i].alt_src[j] = src.replace(os.path.join(mnt), os.path.join(snap_mnt), 1)
def snap_cleanup(self, rset=None):
if not hasattr(self, 'snaps'):
return
if len(self.snaps) == 0 :
return
for s in self.snaps.keys():
self.snapdestroykey(s)
if rset is None:
return
for i, r in enumerate(rset.resources):
if hasattr(rset.resources[i], 'alt_src'):
delattr(rset.resources[i], 'alt_src')
def snapcreate(self, m):
""" create a snapshot for m
add self.snaps[m] with
dict(snapinfo key val)
"""
raise ex.MissImpl
def snapdestroykey(self, snaps_key):
""" destroy a snapshot for a snap key
"""
raise ex.MissImpl
opensvc-1.8~20170412/lib/provFs.py 0000644 0001750 0001750 00000006667 13073467726 016701 0 ustar jkelbert jkelbert from provisioning import Provisioning
from rcUtilities import justcall, which, protected_dir
from rcGlobalEnv import rcEnv
import os
import rcExceptions as ex
import shutil
from svcBuilder import conf_get_string_scope
class ProvisioningFs(Provisioning):
# required from child classes:
# mkfs = ['mkfs.ext4', '-F']
# info = ['tune2fs', '-l']
def __init__(self, r):
Provisioning.__init__(self, r)
def check_fs(self):
if not hasattr(self, "info"):
return True
cmd = self.info + [self.mkfs_dev]
out, err, ret = justcall(cmd)
if ret == 0:
return True
self.r.log.info("%s is not formatted"%self.mkfs_dev)
return False
def provision_dev(self):
if rcEnv.sysname == 'Linux':
p = __import__("provDiskLvLinux")
elif rcEnv.sysname == 'HP-UX':
p = __import__("provDiskLvHP-UX")
else:
return
p.ProvisioningDisk(self.r).provisioner()
def unprovision_dev(self):
if rcEnv.sysname == 'Linux':
p = __import__("provDiskLvLinux")
else:
return
p.ProvisioningDisk(self.r).unprovisioner()
def provisioner_fs(self):
self.dev = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "dev")
self.mnt = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "mnt")
if not os.path.exists(self.mnt):
os.makedirs(self.mnt)
self.r.log.info("%s mount point created"%self.mnt)
if not os.path.exists(self.dev) and self.r.fs_type not in self.r.netfs:
self.provision_dev()
self.mkfs_dev = self.dev
if rcEnv.sysname == 'HP-UX':
l = self.dev.split('/')
l[-1] = 'r'+l[-1]
self.mkfs_dev = '/'.join(l)
if not os.path.exists(self.mkfs_dev):
self.r.log.error("%s raw device does not exists"%self.mkfs_dev)
return
if not os.path.exists(self.mkfs_dev):
raise ex.excError("abort fs provisioning: %s does not exist" % self.mkfs_dev)
if self.check_fs():
self.r.log.info("already provisioned")
return
if hasattr(self, "do_mkfs"):
self.do_mkfs()
elif hasattr(self, "mkfs"):
try:
opts = conf_get_string_scope(self.r.svc, self.r.svc.config, self.r.rid, "mkfs_opt").split()
except:
opts = []
cmd = self.mkfs + opts + [self.mkfs_dev]
(ret, out, err) = self.r.vcall(cmd)
if ret != 0:
self.r.log.error('Failed to format %s'%self.mkfs_dev)
raise ex.excError
else:
raise ex.excError("no mkfs method implemented")
self.r.log.info("provisioned")
def provisioner(self):
if "bind" in self.r.mount_options:
return
self.provisioner_fs()
self.r.start()
def purge_mountpoint(self):
if os.path.exists(self.r.mount_point) and not protected_dir(self.r.mount_point):
self.r.log.info("rm -rf %s" % self.r.mount_point)
try:
shutil.rmtree(self.r.mount_point)
except Exception as e:
raise ex.excError(str(e))
def unprovisioner_fs(self):
pass
def unprovisioner(self):
self.r.stop()
self.unprovisioner_fs()
self.purge_mountpoint()
self.unprovision_dev()
opensvc-1.8~20170412/lib/xmlrpcClient.py 0000644 0001750 0001750 00000153505 13073467726 020060 0 ustar jkelbert jkelbert from __future__ import print_function
import socket
import sys
socket.setdefaulttimeout(180)
kwargs = {}
try:
import ssl
kwargs["context"] = ssl._create_unverified_context()
except:
pass
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
try:
import httplib
except ImportError:
import http.client as httplib
def get_proxy(uri):
try:
return xmlrpclib.ServerProxy(uri, **kwargs)
except Exception as e:
if "__init__" in str(e):
return xmlrpclib.ServerProxy(uri)
from datetime import datetime, timedelta
import time
import random
import os
import sys
from rcGlobalEnv import rcEnv
import rcStatus
import rcExceptions as ex
hostId = __import__('hostid'+rcEnv.sysname)
hostid = hostId.hostid()
rcEnv.warned = False
import logging
import logging.handlers
logfile = os.path.join(rcEnv.pathlog, 'xmlrpc.log')
log = logging.getLogger("xmlrpc")
log.setLevel(logging.INFO)
try:
fileformatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
filehandler = logging.handlers.RotatingFileHandler(os.path.join(logfile),
maxBytes=5242880,
backupCount=5)
filehandler.setFormatter(fileformatter)
log.addHandler(filehandler)
except Exception as e:
pass
try:
if sys.version_info[0] >= 3:
from multiprocessing import queue as Queue, Process
else:
from multiprocessing import Queue, Process
from Queue import Empty
if rcEnv.sysname == 'Windows':
from multiprocessing import set_executable
set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
mp = True
except:
mp = False
def do_call(fn, args, kwargs, log, proxy, mode="synchronous"):
tries = 5
for i in range(tries):
try:
return _do_call(fn, args, kwargs, log, proxy, mode=mode)
except Exception as e:
s = str(e)
if "retry" in s:
# db table changed. retry immediately
max_wait = 0
elif "restart" in s or "Gateway" in s:
# collector overload issues, retry after a random delay
max_wait = 3
else:
# no need to retry at all there, unknown cause
raise
if max_wait > 0:
time.sleep(random.random()*max_wait)
log.warning("retry call %s on error %s" % (fn, str(e)))
log.error("failed to call %s after %d tries" % (fn, tries))
def _do_call(fn, args, kwargs, log, proxy, mode="synchronous"):
log.info("call remote function %s in %s mode"%(fn, mode))
try:
_b = datetime.now()
buff = getattr(proxy, fn)(*args, **kwargs)
_e = datetime.now()
_d = _e - _b
log.info("call %s done in %d.%03d seconds"%(fn, _d.seconds, _d.microseconds//1000))
return buff
except Exception as e:
_e = datetime.now()
_d = _e - _b
log.exception("call %s error after %d.%03d seconds"%(fn, _d.seconds, _d.microseconds//1000))
print("a stack has been saved to the rpc log", file=sys.stderr)
def call_worker(q, node):
e = "foo"
o = Collector(worker=True, node=node)
o.init()
try:
while e is not None:
e = q.get()
if e is None:
break
fn, args, kwargs = e
do_call(fn, args, kwargs, o.log, o.proxy, mode="asynchronous")
o.log.info("shutdown")
except ex.excSignal:
o.log.info("interrupted on signal")
class Collector(object):
def submit(self, fn, *args, **kwargs):
self.init_worker()
self.log.info("enqueue %s"%fn)
self.queue.put((fn, args, kwargs), block=True)
def call(self, *args, **kwargs):
fn = args[0]
self.init(fn)
if rcEnv.dbopensvc == "None":
return {"ret": 1, "msg": "no collector defined. set 'dbopensvc' in node.conf"}
if len(self.proxy_methods) == 0:
return
if len(args) > 1:
args = args[1:]
else:
args = []
if fn == "register_node" and \
'register_node' not in self.proxy_methods:
print("collector does not support node registration", file=sys.stderr)
return
if rcEnv.uuid == "" and \
rcEnv.dbopensvc != "None" and \
not rcEnv.warned and \
self.auth_node and \
fn != "register_node":
print("this node is not registered. try 'nodemgr register'", file=sys.stderr)
print("to disable this warning, set 'dbopensvc = None' in node.conf", file=sys.stderr)
rcEnv.warned = True
return
return do_call(fn, args, kwargs, self.log, self, mode="synchronous")
def __init__(self, worker=False, node=None):
self.node = node
self.proxy = None
self.proxy_methods = []
self.comp_proxy = None
self.comp_proxy_methods = []
self._worker = worker
self.worker = None
self.queue = None
self.comp_fns = ['comp_get_data_moduleset',
'comp_get_svc_data_moduleset',
'comp_get_data',
'comp_get_svc_data',
'comp_attach_moduleset',
'comp_attach_svc_moduleset',
'comp_detach_moduleset',
'comp_detach_svc_moduleset',
'comp_get_ruleset',
'comp_get_svc_ruleset',
'comp_get_ruleset_md5',
'comp_attach_ruleset',
'comp_attach_svc_ruleset',
'comp_detach_ruleset',
'comp_detach_svc_ruleset',
'comp_list_ruleset',
'comp_list_moduleset',
'comp_show_status',
'comp_log_actions']
self.auth_node = True
self.log = logging.getLogger("xmlrpc%s"%('.worker' if worker else ''))
def get_methods_dbopensvc(self):
if rcEnv.dbopensvc == "None":
self.proxy_methods = []
return
self.log.debug("get dbopensvc method list")
try:
if self.proxy is None:
self.proxy = get_proxy(rcEnv.dbopensvc)
self.proxy_methods = self.proxy.system.listMethods()
except Exception as e:
self.log.error(str(e))
self.proxy = get_proxy("https://127.0.0.1/")
self.proxy_methods = []
self.log.debug("%d feed methods"%len(self.proxy_methods))
def get_methods_dbcompliance(self):
if rcEnv.dbcompliance == "None":
self.comp_proxy_methods = []
return
self.log.debug("get dbcompliance method list")
try:
if self.comp_proxy is None:
self.comp_proxy = get_proxy(rcEnv.dbcompliance)
self.comp_proxy_methods = self.comp_proxy.system.listMethods()
except Exception as e:
self.log.error(str(e))
self.comp_proxy = get_proxy("https://127.0.0.1/")
self.comp_proxy_methods = []
self.log.debug("%d compliance methods"%len(self.comp_proxy_methods))
def init(self, fn=None):
if fn is not None:
if fn in self.comp_fns:
if self.comp_proxy is not None:
return
elif self.proxy is not None:
return
if rcEnv.dbopensvc == "None":
return
try:
a = socket.getaddrinfo(rcEnv.dbopensvc_host, None)
if len(a) == 0:
raise Exception
dbopensvc_ip = a[0][-1][0]
except:
self.log.error("could not resolve %s to an ip address. disable collector updates."%rcEnv.dbopensvc_host)
try:
a = socket.getaddrinfo(rcEnv.dbcompliance_host, None)
if len(a) == 0:
raise Exception
dbcompliance_ip = a[0][-1][0]
except Exception as e:
self.log.error(str(e))
self.log.error("could not resolve %s to an ip address. disable collector updates."%rcEnv.dbcompliance_host)
try:
self.proxy = get_proxy(rcEnv.dbopensvc)
self.get_methods_dbopensvc()
except Exception as e:
self.log.error(str(e))
self.proxy = get_proxy("https://127.0.0.1/")
if fn in self.comp_fns:
try:
self.comp_proxy = get_proxy(rcEnv.dbcompliance)
self.get_methods_dbcompliance()
except:
self.comp_proxy = get_proxy("https://127.0.0.1/")
self.log.info("feed proxy %s"%str(self.proxy))
self.log.info("compliance proxy %s"%str(self.comp_proxy))
if "register_node" not in self.proxy_methods:
self.auth_node = False
def stop_worker(self):
if self.queue is None:
self.log.debug("worker already stopped (queue is None)")
return
if self.worker is None:
self.log.debug("worker already stopped (worker is None)")
return
try:
if not self.worker.is_alive():
self.log.debug("worker already stopped (not alive)")
return
except AssertionError:
self.log.error("don't stop worker (not a child of this process)")
return
self.log.debug("give poison pill to worker")
self.queue.put(None)
self.worker.join()
self.queue = None
self.worker = None
def init_worker(self):
if self._worker:
return
if self.worker is not None:
return
try:
self.queue = Queue()
except Exception as e:
self.log.error("Queue not supported. disable async mode. %s" % str(e))
self.queue = None
return
self.worker = Process(target=call_worker, name="xmlrpc", args=(self.queue, self.node))
self.worker.start()
self.log.info("worker started")
def begin_action(self, svc, action, begin, sync=True):
args = [['svcname',
'action',
'hostname',
'hostid',
'version',
'begin',
'cron'],
[str(svc.svcname),
str(action),
str(rcEnv.nodename),
str(hostid),
str(svc.node.agent_version()),
str(begin),
'1' if svc.options.cron else '0']
]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.begin_action(*args)
else:
self.submit("begin_action", *args)
def end_action(self, svc, action, begin, end, logfile, sync=True):
err = 'ok'
dateprev = None
res = None
res_err = None
pid = None
msg = None
lines = open(logfile, 'r').read()
pids = set([])
"""Example logfile line:
2009-11-11 01:03:25,252;;DISK.VG;;INFO;;unxtstsvc01_data is already up;;10200;;EOL
"""
vars = ['svcname',
'action',
'hostname',
'hostid',
'pid',
'begin',
'end',
'status_log',
'status',
'cron']
vals = []
for line in lines.split(';;EOL\n'):
if line.count(';;') != 4:
continue
if ";;status_history;;" in line:
continue
date = line.split(';;')[0]
"""Push to database the previous line, so that begin and end
date are available.
"""
if res is not None and dateprev is not None:
res = res.lower()
res = res.replace(svc.svcname+'.','')
vals.append([svc.svcname,
res+' '+action,
rcEnv.nodename,
hostid,
pid,
dateprev,
date,
msg,
res_err,
'1' if svc.options.cron else '0'])
res_err = 'ok'
(date, res, lvl, msg, pid) = line.split(';;')
# database overflow protection
trim_lim = 10000
trim_tag = ' '
trim_head = int(trim_lim/2)
trim_tail = trim_head-len(trim_tag)
if len(msg) > trim_lim:
msg = msg[:trim_head]+' '+msg[-trim_tail:]
pids |= set([pid])
if lvl is None or lvl == 'DEBUG':
continue
if lvl == 'ERROR':
err = 'err'
res_err = 'err'
if lvl == 'WARNING' and err != 'err':
err = 'warn'
if lvl == 'WARNING' and res_err != 'err':
res_err = 'warn'
dateprev = date
"""Push the last log entry, using 'end' as end date
"""
if dateprev is not None:
res = res.lower()
res = res.replace(svc.svcname+'.','')
vals.append([svc.svcname,
res+' '+action,
rcEnv.nodename,
hostid,
pid,
dateprev,
date,
msg,
res_err,
'1' if svc.options.cron else '0'])
if len(vals) > 0:
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.res_action_batch(*args)
else:
self.submit("res_action_batch", *args)
"""Complete the wrap-up database entry
"""
""" If logfile is empty, default to current process pid
"""
if len(pids) == 0:
pids = set([os.getpid()])
args = [
['svcname',
'action',
'hostname',
'hostid',
'pid',
'begin',
'end',
'time',
'status',
'cron'],
[str(svc.svcname),
str(action),
str(rcEnv.nodename),
str(hostid),
','.join(map(str, pids)),
str(begin),
str(end),
str(end-begin),
str(err),
'1' if svc.options.cron else '0']
]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.end_action(*args)
else:
self.submit("end_action", *args)
def svcmon_update_combo(self, g_vars, g_vals, r_vars, r_vals, sync=True):
if 'svcmon_update_combo' in self.proxy_methods:
args = [g_vars, g_vals, r_vars, r_vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.svcmon_update_combo(*args)
else:
self.submit("svcmon_update_combo", *args)
else:
args = [g_vars, g_vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.svcmon_update(*args)
else:
self.submit("svcmon_update", *args)
args = [r_vars, r_vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if sync or not mp:
self.proxy.resmon_update(*args)
else:
self.submit("resmon_update", *args)
def _push_resinfo(self, svc, sync=True):
if 'update_resinfo' not in self.proxy_methods:
return
vars = ['res_svcname',
'res_nodename',
'cluster_type',
'rid',
'res_key',
'res_value']
vals = []
for r in svc.get_resources():
l = [
["driver", r.type],
["always_on", str(rcEnv.nodename in r.always_on).lower()],
["optional", str(r.optional).lower()],
["disabled", str(r.disabled).lower()],
["monitor", str(r.monitor).lower()],
["restart", str(r.nb_restart)],
]
if r.subset:
l.append(["subset", r.subset])
if len(r.tags) > 0:
l.append(["tags", " ".join(r.tags)])
vals += r.fmt_info(l)
if not hasattr(r, "info"):
continue
try:
vals += r.info()
except Exception as e:
print(e, file=sys.stderr)
if len(vals) == 0:
return
for val in vals:
try:
print("%-16s %-20s %s"%(val[3], val[4], val[5]))
except Exception as e:
print(e, val, file=sys.stderr)
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_resinfo(*args)
def push_service(self, svc, sync=True):
def repr_config(svc):
import codecs
cf = os.path.join(rcEnv.pathetc, svc+'.conf')
if not os.path.exists(cf):
return
with codecs.open(cf, 'r', encoding="utf8") as f:
buff = f.read()
return buff
return
vars = ['svc_hostid',
'svc_name',
'svc_cluster_type',
'svc_flex_min_nodes',
'svc_flex_max_nodes',
'svc_flex_cpu_low_threshold',
'svc_flex_cpu_high_threshold',
'svc_env',
'svc_nodes',
'svc_drpnode',
'svc_drpnodes',
'svc_comment',
'svc_drptype',
'svc_autostart',
'svc_app',
'svc_containertype',
'svc_config',
'svc_drnoaction',
'svc_ha']
vals = [hostid,
svc.svcname,
svc.clustertype,
svc.flex_min_nodes,
svc.flex_max_nodes,
svc.flex_cpu_low_threshold,
svc.flex_cpu_high_threshold,
svc.svc_env,
' '.join(svc.nodes),
svc.drpnode,
' '.join(svc.drpnodes),
svc.comment,
svc.drp_type,
' '.join(svc.autostart_node),
svc.app,
svc.svcmode,
repr_config(svc.svcname),
svc.drnoaction,
'1' if svc.ha else '0']
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_service(*args)
vars = ['mon_svcname',
'mon_nodname',
'mon_vmname',
'mon_guestos',
'mon_vmem',
'mon_vcpus',
'mon_containerpath']
vals = []
for container in svc.get_resources('container'):
container_info = container.get_container_info()
vals += [[svc.svcname,
rcEnv.nodename,
container.vm_hostname(),
container.guestos if hasattr(container, 'guestos') and container.guestos is not None else "",
container_info['vmem'],
container_info['vcpus'],
container.zonepath if hasattr(container, 'zonepath') else ""]]
if len(vals) > 0:
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.svcmon_update(*args)
def push_disks(self, node, sync=True):
import re
di = __import__('rcDiskInfo'+rcEnv.sysname)
disks = di.diskInfo()
try:
m = __import__("rcDevTree"+rcEnv.sysname)
except ImportError:
return
tree = m.DevTree()
tree.load(di=disks)
vars = ['disk_id',
'disk_svcname',
'disk_size',
'disk_used',
'disk_vendor',
'disk_model',
'disk_dg',
'disk_nodename',
'disk_region']
vals = []
# hash to add up disk usage across all services
dh = {}
served_disks = []
svcs = node.svcs
for svc in svcs:
# hash to add up disk usage inside a service
valsh = {}
for r in svc.get_resources():
if hasattr(r, "name"):
disk_dg = r.name
elif hasattr(r, "dev"):
disk_dg = r.dev
else:
disk_dg = r.rid
if hasattr(r, 'devmap') and hasattr(r, 'vm_hostname'):
if hasattr(svc, "clustername"):
cluster = svc.clustername
else:
cluster = ','.join(sorted(list(svc.nodes)))
served_disks += map(lambda x: (x[0], r.vm_hostname()+'.'+x[1], cluster), r.devmap())
try:
devpaths = r.devlist()
except Exception as e:
print(e)
devpaths = []
for devpath in devpaths:
for d, used, region in tree.get_top_devs_usage_for_devpath(devpath):
disk_id = disks.disk_id(d)
if disk_id is None or disk_id == "":
""" no point pushing to db an empty entry
"""
continue
if disk_id.startswith(rcEnv.nodename+".loop"):
continue
disk_size = disks.disk_size(d)
if disk_id in dh:
dh[disk_id] += used
else:
dh[disk_id] = used
if dh[disk_id] > disk_size:
dh[disk_id] = disk_size
if disk_id not in valsh or used == disk_size:
valsh[disk_id] = [
disk_id,
svc.svcname,
disk_size,
used,
disks.disk_vendor(d),
disks.disk_model(d),
disk_dg,
rcEnv.nodename,
region
]
elif disk_id in valsh and valsh[disk_id][3] < disk_size:
valsh[disk_id][3] += used
valsh[disk_id][6] = ""
valsh[disk_id][8] = ""
if valsh[disk_id][3] > disk_size:
valsh[disk_id][3] = disk_size
for l in valsh.values():
vals += [l]
print(l[1], "disk", l[0], "%d/%dM"%(l[3], l[2]), "region", region)
done = []
region = 0
try:
devpaths = node.devlist(tree)
except Exception as e:
print(e)
devpaths = []
for d in devpaths:
disk_id = disks.disk_id(d)
if disk_id is None or disk_id == "":
""" no point pushing to db an empty entry
"""
continue
if disk_id.startswith(rcEnv.nodename+".loop"):
continue
if re.match(r"/dev/rdsk/.*s[01345678]", d):
# don't report partitions
continue
# Linux Node:devlist() reports paths, so we can have duplicate
# disks here.
if disk_id in done:
continue
done.append(disk_id)
disk_size = disks.disk_size(d)
if disk_id in dh:
left = disk_size - dh[disk_id]
else:
left = disk_size
if left == 0:
continue
print(rcEnv.nodename, "disk", disk_id, "%d/%dM"%(left, disk_size), "region", region)
vals.append([
disk_id,
"",
disk_size,
left,
disks.disk_vendor(d),
disks.disk_model(d),
"",
rcEnv.nodename,
region
])
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.register_disks(*args)
#
# register disks this node provides to its VM
#
vars = ['disk_id',
'disk_arrayid',
'disk_devid',
'disk_size',
'disk_raid',
'disk_group']
vals = []
for dev_id, vdisk_id, cluster in served_disks:
disk_id = disks.disk_id(dev_id)
try:
disk_size = disks.disk_size(dev_id)
except:
continue
vals.append([
vdisk_id,
cluster,
disk_id,
disk_size,
"virtual",
"virtual"
])
print("register served disk", disk_id, "as", vdisk_id, "from varray", cluster)
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.register_diskinfo(*args)
def push_stats_fs_u(self, l, sync=True):
args = [l[0], l[1]]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.insert_stats_fs_u(*args)
def push_pkg(self, sync=True):
p = __import__('rcPkg'+rcEnv.sysname)
vars = ['pkg_nodename',
'pkg_name',
'pkg_version',
'pkg_arch']
vals = p.listpkg()
n = len(vals)
if n == 0:
print("No package found. Skip push.")
return
else:
print("Pushing %d packages information."%n)
if len(vals[0]) >= 5:
vars.append('pkg_type')
if len(vals[0]) >= 6:
vars.append('pkg_install_date')
if len(vals[0]) >= 7:
vars.append('pkg_sig')
args = [rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.insert_pkg(*args)
def push_patch(self, sync=True):
p = __import__('rcPkg'+rcEnv.sysname)
vars = ['patch_nodename',
'patch_num',
'patch_rev']
vals = p.listpatch()
if len(vals) == 0:
return
if len(vals[0]) == 4:
vars.append('patch_install_date')
args = [rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.insert_patch(*args)
def push_stats(self, interval=None, stats_dir=None,
stats_start=None, stats_end=None, sync=True, disable=None):
try:
s = __import__('rcStats'+rcEnv.sysname)
except ImportError:
return
try:
sp = s.StatsProvider(interval=interval,
stats_dir=stats_dir,
stats_start=stats_start,
stats_end=stats_end)
except ValueError as e:
print(str(e))
return 1
except Exception as e:
print(e)
raise
h = {}
for stat in ['cpu', 'mem_u', 'proc', 'swap', 'block',
'blockdev', 'netdev', 'netdev_err', 'svc', 'fs_u']:
if disable is not None and stat in disable:
print("%s collection is disabled in node configuration"%stat)
continue
h[stat] = sp.get(stat)
print("%s stats: %d samples" % (stat, len(h[stat][1])))
import json
args = [json.dumps(h)]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
print("pushing")
self.proxy.insert_stats(*args)
def sysreport_lstree(self, sync=True):
args = []
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
data = self.proxy.sysreport_lstree(*args)
return data
def send_sysreport(self, fpath, deleted, sync=True):
args = []
if fpath is None:
args += ["", ""]
else:
with open(fpath, 'rb') as f:
binary = xmlrpclib.Binary(f.read())
args = [os.path.basename(fpath), binary]
print("archive length:", len(binary.data))
args += [deleted]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.send_sysreport(*args)
def push_asset(self, node, sync=True):
try:
m = __import__('rcAsset'+rcEnv.sysname)
except ImportError:
print("pushasset methods not implemented on", rcEnv.sysname)
return
if "update_asset" not in self.proxy_methods:
print("'update_asset' method is not exported by the collector")
return
d = m.Asset(node).get_asset_dict()
gen = {}
if 'hba' in d:
vars = ['nodename', 'hba_id', 'hba_type']
vals = []
for hba_id, hba_type in d['hba']:
vals.append([rcEnv.nodename, hba_id, hba_type])
del(d['hba'])
gen.update({'hba': [vars, vals]})
if 'targets' in d:
import copy
vars = ['hba_id', 'tgt_id']
vals = copy.copy(d['targets'])
del(d['targets'])
gen.update({'targets': [vars, vals]})
if 'lan' in d:
vars = ['mac', 'intf', 'type', 'addr', 'mask', 'flag_deprecated']
vals = []
for mac, l in d['lan'].items():
for _d in l:
vals.append([mac, _d['intf'], _d['type'], _d['addr'], _d['mask'], _d['flag_deprecated']])
del(d['lan'])
gen.update({'lan': [vars, vals]})
if 'uids' in d:
vars = ['user_name', 'user_id']
vals = d['uids']
del(d['uids'])
gen.update({'uids': [vars, vals]})
if 'gids' in d:
vars = ['group_name', 'group_id']
vals = d['gids']
del(d['gids'])
gen.update({'gids': [vars, vals]})
if len(gen) > 0:
args = [gen]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.insert_generic(*args)
args = [list(d.keys()), list(d.values())]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
if node.options.syncrpc:
self.proxy.update_asset_sync(*args)
else:
self.proxy.update_asset(*args)
def push_brocade(self, objects=[], sync=True):
if 'update_brocade' not in self.proxy_methods:
print("'update_brocade' method is not exported by the collector")
return
m = __import__('rcBrocade')
try:
brocades = m.Brocades(objects)
except:
return
for brocade in brocades:
vals = []
for key in brocade.keys:
try:
vals.append(getattr(brocade, 'get_'+key)())
except:
print("error fetching", key)
continue
args = [brocade.name, brocade.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_brocade(*args)
def push_vioserver(self, objects=[], sync=True):
if 'update_vioserver' not in self.proxy_methods:
print("'update_vioserver' method is not exported by the collector")
return
m = __import__('rcVioServer')
try:
vioservers = m.VioServers(objects)
except:
return
for vioserver in vioservers:
vals = []
for key in vioserver.keys:
vals.append(getattr(vioserver, 'get_'+key)())
args = [vioserver.name, vioserver.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_vioserver(*args)
def push_hds(self, objects=[], sync=True):
if 'update_hds' not in self.proxy_methods:
print("'update_hds' method is not exported by the collector")
return
m = __import__('rcHds')
try:
hdss = m.Arrays(objects)
except Exception as e:
print(e, file=sys.stderr)
return
for hds in hdss:
vals = []
for key in hds.keys:
vals.append(getattr(hds, 'get_'+key)())
args = [hds.name, hds.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_hds(*args)
def push_necism(self, objects=[], sync=True):
if 'update_necism' not in self.proxy_methods:
print("'update_necism' method is not exported by the collector")
return
m = __import__('rcNecIsm')
try:
necisms = m.NecIsms(objects)
except:
return
for necism in necisms:
vals = []
for key in necism.keys:
vals.append(getattr(necism, 'get_'+key)())
args = [necism.name, necism.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_necism(*args)
def push_hp3par(self, objects=[], sync=True):
if 'update_hp3par' not in self.proxy_methods:
print("'update_hp3par' method is not exported by the collector")
return
m = __import__('rcHp3par')
try:
hp3pars = m.Hp3pars(objects)
except:
return
for hp3par in hp3pars:
vals = []
for key in hp3par.keys:
vals.append(getattr(hp3par, 'get_'+key)())
args = [hp3par.name, hp3par.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_hp3par(*args)
def push_centera(self, objects=[], sync=True):
if 'update_centera' not in self.proxy_methods:
print("'update_centera' method is not exported by the collector")
return
m = __import__('rcCentera')
try:
centeras = m.Centeras(objects)
except:
return
for centera in centeras:
vals = []
print(centera.name)
for key in centera.keys:
print(" extract", key)
vals.append(getattr(centera, 'get_'+key)())
args = [centera.name, [k+".xml" for k in centera.keys], vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_centera(*args)
def push_emcvnx(self, objects=[], sync=True):
if 'update_emcvnx' not in self.proxy_methods:
print("'update_emcvnx' method is not exported by the collector")
return
m = __import__('rcEmcVnx')
try:
emcvnxs = m.EmcVnxs(objects)
except:
return
for emcvnx in emcvnxs:
vals = []
print(emcvnx.name)
for key in emcvnx.keys:
print(" extract", key)
vals.append(getattr(emcvnx, 'get_'+key)())
args = [emcvnx.name, emcvnx.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_emcvnx(*args)
def push_netapp(self, objects=[], sync=True):
if 'update_netapp' not in self.proxy_methods:
print("'update_netapp' method is not exported by the collector")
return
m = __import__('rcNetapp')
try:
netapps = m.Netapps(objects)
except:
return
for netapp in netapps:
vals = []
print(netapp.name)
for key in netapp.keys:
print(" extract", key)
vals.append(getattr(netapp, 'get_'+key)())
args = [netapp.name, netapp.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_netapp(*args)
def push_ibmsvc(self, objects=[], sync=True):
if 'update_ibmsvc' not in self.proxy_methods:
print("'update_ibmsvc' method is not exported by the collector")
return
m = __import__('rcIbmSvc')
try:
ibmsvcs = m.IbmSvcs(objects)
except:
return
for ibmsvc in ibmsvcs:
vals = []
for key in ibmsvc.keys:
vals.append(getattr(ibmsvc, 'get_'+key)())
args = [ibmsvc.name, ibmsvc.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_ibmsvc(*args)
def push_nsr(self, sync=True):
if 'update_nsr' not in self.proxy_methods:
print("'update_nsr' method is not exported by the collector")
return
m = __import__('rcNsr')
try:
nsr = m.Nsr()
except:
return
vals = []
for key in nsr.keys:
vals.append(getattr(nsr, 'get_'+key)())
args = [rcEnv.nodename, nsr.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_nsr(*args)
except:
print("error pushing nsr index")
def push_ibmds(self, objects=[], sync=True):
if 'update_ibmds' not in self.proxy_methods:
print("'update_ibmds' method is not exported by the collector")
return
m = __import__('rcIbmDs')
try:
ibmdss = m.IbmDss(objects)
except:
return
for ibmds in ibmdss:
vals = []
for key in ibmds.keys:
vals.append(getattr(ibmds, 'get_'+key)())
args = [ibmds.name, ibmds.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_ibmds(*args)
except:
print("error pushing", ibmds.name)
def push_gcedisks(self, objects=[], sync=True):
if 'update_gcedisks' not in self.proxy_methods:
print("'update_gcedisks' method is not exported by the collector")
return
m = __import__('rcGceDisks')
try:
arrays = m.GceDiskss(objects)
except:
return
for array in arrays:
vals = []
for key in array.keys:
vals.append(getattr(array, 'get_'+key)())
args = [array.name, array.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_gcedisks(*args)
except Exception as e:
print("error pushing %s: %s" % (array.name, str(e)))
def push_freenas(self, objects=[], sync=True):
if 'update_freenas' not in self.proxy_methods:
print("'update_freenas' method is not exported by the collector")
return
m = __import__('rcFreenas')
try:
arrays = m.Freenass(objects)
except:
return
for array in arrays:
vals = []
for key in array.keys:
vals.append(getattr(array, 'get_'+key)())
args = [array.name, array.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_freenas(*args)
except:
print("error pushing", array.name)
def push_xtremio(self, objects=[], sync=True):
if 'update_xtremio' not in self.proxy_methods:
print("'update_xtremio' method is not exported by the collector")
return
m = __import__('rcXtremio')
try:
arrays = m.Arrays(objects)
except:
return
for array in arrays:
vals = []
for key in array.keys:
vals.append(getattr(array, 'get_'+key)())
args = [array.name, array.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_xtremio(*args)
except Exception as exc:
print("error pushing", array.name, file=sys.stderr)
print(exc, file=sys.stderr)
raise ex.excError
def push_dcs(self, objects=[], sync=True):
if 'update_dcs' not in self.proxy_methods:
print("'update_dcs' method is not exported by the collector")
return
m = __import__('rcDcs')
try:
dcss = m.Dcss(objects)
except:
return
for dcs in dcss:
vals = []
for key in dcs.keys:
vals.append(getattr(dcs, 'get_'+key)())
args = [dcs.name, dcs.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
self.proxy.update_dcs(*args)
except:
print("error pushing", dcs.name)
def push_eva(self, objects=[], sync=True):
if 'update_eva_xml' not in self.proxy_methods:
print("'update_eva_xml' method is not exported by the collector")
return
m = __import__('rcEva')
try:
evas = m.Evas(objects)
except:
return
for eva in evas:
vals = []
for key in eva.keys:
vals.append(getattr(eva, 'get_'+key)())
args = [eva.name, eva.keys, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_eva_xml(*args)
def push_sym(self, objects=[], sync=True):
import zlib
if 'update_sym_xml' not in self.proxy_methods:
print("'update_sym_xml' method is not exported by the collector")
return 1
m = __import__('rcSymmetrix')
try:
syms = m.Arrays(objects)
except Exception as e:
print(e)
return 1
r = 0
for sym in syms:
# can be too big for a single rpc
print(sym.sid)
for key in sym.keys:
print(" extract", key)
vars = [key]
try:
vals = [xmlrpclib.Binary(zlib.compress(getattr(sym, 'get_'+key)()))]
except Exception as e:
print(e)
continue
args = [sym.sid, vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
try:
print(" send ", key)
self.proxy.update_sym_xml(*args)
except Exception as e:
print(sym.sid, key, ":", e)
r = 1
continue
# signal all files are received
args = [sym.sid, [], []]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.update_sym_xml(*args)
return r
def push_all(self, svcs, sync=True):
args = [[svc.svcname for svc in svcs]]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
for svc in svcs:
self.push_service(svc, sync=sync)
def push_resinfo(self, svcs, sync=True):
args = [[svc.svcname for svc in svcs]]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
for svc in svcs:
try:
self._push_resinfo(svc, sync=sync)
except Exception as e:
print(e)
def push_checks(self, vars, vals, sync=True):
if "push_checks" not in self.proxy_methods:
print("'push_checks' method is not exported by the collector")
return
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
self.proxy.push_checks(*args)
def register_node(self, sync=True):
return self.proxy.register_node(rcEnv.nodename)
def comp_get_data(self, modulesets=[], sync=True):
args = [rcEnv.nodename, modulesets]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_data_v2(*args)
def comp_get_svc_data(self, svcname, modulesets=[], sync=True):
args = [rcEnv.nodename, svcname, modulesets]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_svc_data_v2(*args)
def comp_get_data_moduleset(self, sync=True):
args = [rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_data_moduleset(*args)
def comp_get_svc_data_moduleset(self, svc, sync=True):
args = [svc]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_svc_data_moduleset(*args)
def comp_attach_moduleset(self, moduleset, sync=True):
args = [rcEnv.nodename, moduleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_attach_moduleset(*args)
def comp_attach_svc_moduleset(self, svc, moduleset, sync=True):
args = [svc, moduleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_attach_svc_moduleset(*args)
def comp_detach_svc_moduleset(self, svcname, moduleset, sync=True):
args = [svcname, moduleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_detach_svc_moduleset(*args)
def comp_detach_moduleset(self, moduleset, sync=True):
args = [rcEnv.nodename, moduleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_detach_moduleset(*args)
def comp_get_svc_ruleset(self, svcname, sync=True):
args = [svcname]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_svc_ruleset(*args)
def comp_get_ruleset(self, sync=True):
args = [rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_ruleset(*args)
def comp_get_ruleset_md5(self, rset_md5, sync=True):
args = [rset_md5]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_get_ruleset_md5(*args)
def comp_attach_ruleset(self, ruleset, sync=True):
args = [rcEnv.nodename, ruleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_attach_ruleset(*args)
def comp_detach_svc_ruleset(self, svcname, ruleset, sync=True):
args = [svcname, ruleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_detach_svc_ruleset(*args)
def comp_attach_svc_ruleset(self, svcname, ruleset, sync=True):
args = [svcname, ruleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_attach_svc_ruleset(*args)
def comp_detach_ruleset(self, ruleset, sync=True):
args = [rcEnv.nodename, ruleset]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_detach_ruleset(*args)
def comp_list_ruleset(self, pattern='%', sync=True):
args = [pattern, rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_list_rulesets(*args)
def comp_list_moduleset(self, pattern='%', sync=True):
args = [pattern]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_list_modulesets(*args)
def comp_log_actions(self, vars, vals, sync=True):
args = [vars, vals]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_log_actions(*args)
def comp_show_status(self, svcname, pattern='%', sync=True):
args = [svcname, pattern]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.comp_proxy.comp_show_status(*args)
def collector_update_root_pw(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_update_root_pw(*args)
def collector_ack_unavailability(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_ack_unavailability(*args)
def collector_list_unavailability_ack(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_unavailability_ack(*args)
def collector_list_actions(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_actions(*args)
def collector_ack_action(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_ack_action(*args)
def collector_status(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_status(*args)
def collector_asset(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_asset(*args)
def collector_networks(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_networks(*args)
def collector_checks(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_checks(*args)
def collector_disks(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_disks(*args)
def collector_alerts(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_alerts(*args)
def collector_show_actions(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_show_actions(*args)
def collector_events(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_events(*args)
def collector_tag(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_tag(*args)
def collector_untag(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_untag(*args)
def collector_create_tag(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_create_tag(*args)
def collector_show_tags(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_show_tags(*args)
def collector_list_tags(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_tags(*args)
def collector_list_nodes(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_nodes(*args)
def collector_list_services(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_services(*args)
def collector_list_filtersets(self, opts, sync=True):
args = [opts]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_list_filtersets(*args)
def collector_get_action_queue(self, sync=True):
args = [rcEnv.nodename]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_get_action_queue(*args)
def collector_update_action_queue(self, data, sync=True):
args = [data]
if self.auth_node:
args += [(rcEnv.uuid, rcEnv.nodename)]
return self.proxy.collector_update_action_queue(*args)
if __name__ == "__main__":
x = Collector()
x.init()
print(x.proxy_methods)
print(x.comp_proxy_methods)
opensvc-1.8~20170412/lib/rcPgLinux.py 0000644 0001750 0001750 00000027356 13073467726 017333 0 ustar jkelbert jkelbert import os
import re
import glob
import rcExceptions as ex
from rcUtilities import justcall, convert_size
default_cgroup_mntpt = '/cgroup'
def get_cgroup_mntpt(t):
p = '/proc/mounts'
if not os.path.exists(p):
return None
with open(p, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
if 'cgroup' not in line:
continue
l = line.split()
if len(l) < 6:
continue
if l[2] == 'cgroup':
mntopts = re.split('\W+',l[3])
for opt in mntopts:
if t == opt:
return l[1]
return None
def cgroup_capable(res):
kconf = os.path.join(os.sep, 'lib', 'modules',
os.uname()[2], 'build', '.config')
if not os.path.exists(kconf):
kconf = os.path.join(os.sep, 'boot', 'config-'+os.uname()[2])
if not os.path.exists(kconf):
res.log.info("can not detect if system supports process groups")
return False
with open(kconf, 'r') as f:
for line in f.readlines():
l = line.split('=')
if len(l) != 2:
continue
if l[0] == 'CONFIG_CGROUPS' and l[1] == 'y\n':
return True
res.log.info("system does not support process groups")
return False
def set_task(o, t):
o.log.debug("set_task : start %s" %(t))
cgp = get_cgroup_path(o, t)
path = os.path.join(cgp, "tasks")
pid = str(os.getpid())
with open(path, 'r') as f:
buff = f.read()
if pid in buff.split():
return
try:
o.log.debug("set_task : open path %s for writing" %(path))
with open(path, 'w') as f:
f.write(pid)
except Exception as e:
if hasattr(e, "errno") and e.errno == 28:
# No space left on device
# means the cgroup has not been initialized with caps yet
pass
else:
raise
def set_cgroup(o, t, name, key, force=False):
o.log.debug("set_cgroup : start %s, %s, %s, %s" %(t, name, key, force))
if not hasattr(o, "pg_settings"):
return
if key not in o.pg_settings:
return
value = o.pg_settings[key]
cgp = get_cgroup_path(o, t)
if value is None:
return
if not force and get_cgroup(o, t, name).strip() == str(value):
return
path = os.path.join(cgp, name)
if not os.path.exists(path):
raise ex.excError("can not find %s"%path)
if hasattr(o, "log"):
log = o.log
elif hasattr(o, "svc"):
log = o.svc.log
try:
with open(path, 'w') as f:
f.write(str(value))
log.info('/bin/echo %s > %s'%(value, path))
except Exception as e:
log.warning("failed to set process group setting %s to %s" % (value, path))
def get_cgroup(o, t, name):
o.log.debug("get_cgroup : start %s, %s" %(t, name))
cgp = get_cgroup_path(o, t)
path = os.path.join(cgp, name)
if not os.path.exists(path):
raise ex.excError("can not find %s"%path)
with open(path, 'r') as f:
buff = f.read()
return buff
def set_cpu_quota(o):
if not hasattr(o, "pg_settings"):
return
o.log.debug("set_cpu_quota : start <%s>"%(o.pg_settings))
if 'cpu_quota' not in o.pg_settings:
return
period = int(get_cgroup(o, 'cpu', 'cpu.cfs_period_us'))
v = o.pg_settings["cpu_quota"]
if "@" in v:
try:
quota, cores = v.split("@")
except Exception as e:
raise ex.excError("malformed cpu quota: %s (%s)" % (v, str(e)))
else:
cores = 1
quota = v
if cores == "all":
import rcAssetLinux
cores = int(rcAssetLinux.Asset(None)._get_cpu_cores())
else:
cores = int(cores)
total_us = period * cores
if "%" in quota:
quota = int(quota.strip("%"))
tgt_val = total_us * quota // 100
else:
tgt_val = int(quota)
cur_val = int(get_cgroup(o, 'cpu', 'cpu.cfs_quota_us'))
if tgt_val == cur_val:
return
o.pg_settings["cpu_cfs_quota_us"] = tgt_val
set_cgroup(o, 'cpu', 'cpu.cfs_quota_us', 'cpu_cfs_quota_us')
def set_mem_cgroup(o):
if not hasattr(o, "pg_settings"):
return
o.log.debug("set_mem_cgroup : start <%s>"%(o.pg_settings))
if 'mem_limit' in o.pg_settings:
mem_limit = convert_size(o.pg_settings['mem_limit'], _to="", _round=4096)
o.pg_settings['mem_limit'] = mem_limit
else:
mem_limit = None
if 'vmem_limit' in o.pg_settings:
vmem_limit = convert_size(o.pg_settings['vmem_limit'], _to="", _round=4096)
o.pg_settings['vmem_limit'] = vmem_limit
else:
vmem_limit = None
if hasattr(o, "log"):
log = o.log
elif hasattr(o, "svc"):
log = o.svc.log
#
# validate memory limits sanity and order adequately the resize
# depending on increase/decrease of limits
#
try:
cur_vmem_limit = int(get_cgroup(o, 'memory', 'memory.memsw.limit_in_bytes'))
except ex.excError:
cur_vmem_limit = None
if mem_limit is not None and vmem_limit is not None:
if mem_limit > vmem_limit:
log.error("pg_vmem_limit must be greater than pg_mem_limit")
raise ex.excError
if mem_limit > cur_vmem_limit:
set_cgroup(o, 'memory', 'memory.memsw.limit_in_bytes', 'vmem_limit')
set_cgroup(o, 'memory', 'memory.limit_in_bytes', 'mem_limit')
else:
set_cgroup(o, 'memory', 'memory.limit_in_bytes', 'mem_limit')
set_cgroup(o, 'memory', 'memory.memsw.limit_in_bytes', 'vmem_limit')
elif mem_limit is not None:
if cur_vmem_limit and mem_limit > cur_vmem_limit:
log.error("pg_mem_limit must not be greater than current pg_vmem_limit (%d)"%cur_vmem_limit)
raise ex.excError
set_cgroup(o, 'memory', 'memory.limit_in_bytes', 'mem_limit')
elif vmem_limit is not None:
cur_mem_limit = int(get_cgroup(o, 'memory', 'memory.limit_in_bytes'))
if vmem_limit < cur_mem_limit:
log.error("pg_vmem_limit must not be lesser than current pg_mem_limit (%d)"%cur_mem_limit)
raise ex.excError
set_cgroup(o, 'memory', 'memory.memsw.limit_in_bytes', 'vmem_limit')
def get_cgroup_path(o, t, create=True):
o.log.debug("get_cgroup_path : t=%s, create=%s"%(t, create))
cgroup_mntpt = get_cgroup_mntpt(t)
if hasattr(o, "svcname"):
svcname = o.svcname
else:
svcname = o.svc.svcname
if cgroup_mntpt is None:
raise ex.excError("cgroup fs with option %s is not mounted" % t)
if hasattr(o, "type") and o.type == "container.lxc" and hasattr(o, "name"):
cgp = os.path.join(cgroup_mntpt, "lxc", o.name)
else:
elements = [cgroup_mntpt, svcname]
if hasattr(o, "rset") and o.rset is not None:
elements.append(o.rset.rid.replace(":", "."))
if hasattr(o, "rid") and o.rid is not None:
elements.append(o.rid.replace("#", "."))
cgp = os.path.join(*elements)
if hasattr(o, "log"):
log = o.log
elif hasattr(o, "svc"):
log = o.svc.log
if not os.path.exists(cgp) and create:
if hasattr(o, "cleanup_cgroup"):
o.cleanup_cgroup(t)
create_cgroup(cgp, log=log)
return cgp
def create_cgroup(cgp, log=None):
log.info("create cgroup %s" % cgp)
os.makedirs(cgp)
set_sysfs(cgp+"/cgroup.clone_children", "1", log=log)
for parm in ("cpus", "mems"):
parent_val = get_sysfs(cgp+"/../cpuset."+parm)
set_sysfs(cgp+"/cpuset."+parm, parent_val, log=log)
def get_sysfs(path):
if not os.path.exists(path):
return
with open(path, "r") as f:
return f.read().rstrip("\n")
def set_sysfs(path, val, log=None):
current_val = get_sysfs(path)
if current_val is None:
return
if current_val == val:
return
if log:
log.info("/bin/echo %s >%s" % (val, path))
with open(path, "w") as f:
return f.write(val)
def freeze(o):
return freezer(o, "FROZEN")
def thaw(o):
return freezer(o, "THAWED")
def kill(o):
cgp = get_cgroup_path(o, "freezer")
pids = set([])
for p in glob.glob(cgp+"/tasks") + glob.glob(cgp+"/*/tasks") + glob.glob(cgp+"/*/*/tasks"):
with open(p, "r") as f:
buff = f.read()
pids |= set(buff.split("\n"))
pids.remove("")
if hasattr(o, "log"):
_o = o
else:
_o = o.svc
if len(pids) == 0:
_o.log.info("no task to kill")
return
cmd = ["kill"] + list(pids)
_o.vcall(cmd)
if hasattr(o, "svcname"):
# lxc containers are not parented to the service cgroup
# so we have to kill them individually
for r in o.get_resources("container.lxc"):
kill(r)
def freezer(o, a):
if not cgroup_capable(o):
return
cgp = get_cgroup_path(o, "freezer")
_freezer(o, a, cgp)
if hasattr(o, "svcname"):
# lxc containers are not parented to the service cgroup
# so we have to freeze them individually
for r in o.get_resources("container.lxc"):
freezer(r, a)
def _freezer(o, a, cgp):
path = os.path.join(cgp, "freezer.state")
if hasattr(o, "log"):
log = o.log
elif hasattr(o, "svc"):
log = o.svc.log
if not os.path.exists(path):
raise ex.excError("freezer control file not found: %s"%path)
try:
with open(path, "r") as f:
buff = f.read()
except Exception as e:
raise ex.excError(str(e))
buff = buff.strip()
if buff == a:
log.info("%s is already %s" % (path, a))
return
elif buff == "FREEZING":
log.info("%s is currently FREEZING" % path)
return
try:
with open(path, "w") as f:
buff = f.write(a)
except Exception as e:
raise ex.excError(str(e))
log.info("%s on %s submitted successfully" % (a, path))
# el6 kernel does not freeze child cgroups, as later kernels do
for _cgp in glob.glob(cgp+"/*/*/freezer.state"):
_cgp = os.path.dirname(_cgp)
_freezer(o, a, _cgp)
def get_freeze_state(o):
if not cgroup_capable(o):
return False
cgp = get_cgroup_path(o, "freezer", create=False)
path = os.path.join(cgp, "freezer.state")
if not os.path.exists(path):
return
with open(path, "r") as f:
buff = f.read()
buff = buff.strip()
return buff
def frozen(res):
for o in (res.svc, res.rset, res):
try:
state = get_freeze_state(o)
except Exception as e:
state = None
if state in ("FROZEN", "FREEZING"):
try:
name = o.svcname
except:
name = o.rid
res.status_log("process group %s is %s" % (name, state))
return True
return False
def create_pg(res):
if not cgroup_capable(res):
return
_create_pg(res.svc)
_create_pg(res.rset)
_create_pg(res)
def _create_pg(o):
if o is None:
return
try:
try:
set_task(o, 'systemd')
except:
pass
set_task(o, 'cpu')
set_task(o, 'cpuset')
set_task(o, 'memory')
set_task(o, 'blkio')
set_task(o, 'freezer')
set_cgroup(o, 'cpuset', 'cpuset.cpus', 'cpus')
set_cgroup(o, 'cpu', 'cpu.shares', 'cpu_shares')
set_cgroup(o, 'cpuset', 'cpuset.mems', 'mems')
set_cgroup(o, 'blkio', 'blkio.weight', 'blkio_weight')
set_cgroup(o, 'memory', 'memory.swappiness', 'mem_swappiness')
set_cgroup(o, 'memory', 'memory.oom_control', 'mem_oom_control')
set_mem_cgroup(o)
set_cpu_quota(o)
except Exception as e:
try:
name = o.svcname
except:
name = o.rid
raise ex.excError("process group creation in '%s' cgroup failed: %s"%(name, str(e)))
opensvc-1.8~20170412/lib/checkFsUsageAIX.py 0000644 0001750 0001750 00000001743 13073467726 020305 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-P']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
if l[1] == '-':
continue
if ":/" in l[0]:
continue
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': self.find_svc(l[5]),
})
return r
opensvc-1.8~20170412/lib/rcUpdatePkgSunOS.py 0000644 0001750 0001750 00000003132 13073467726 020543 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilitiesSunOS import get_solaris_version
import os
repo_subdir = "sunos-pkg"
def gen_adminfile():
filename = "/var/tmp/opensvc.adminfile"
f = open (filename, 'w')
f.write("mail=\n")
f.write("instance=overwrite\n")
f.write("partial=nocheck\n")
f.write("runlevel=nocheck\n")
f.write("idepend=nocheck\n")
f.write("rdepend=nocheck\n")
f.write("space=nocheck\n")
f.write("setuid=nocheck\n")
f.write("conflict=nocheck\n")
f.write("action=nocheck\n")
f.write("basedir=default\n")
f.close()
return filename
def update(fpath):
# check downloaded package integrity
cmd = ['pkgchk', '-d', fpath, 'all']
print(' '.join(cmd))
p = Popen(cmd)
p.communicate()
if p.returncode != 0:
return 1
# check if initially installed with pkgadd -G
file = '/var/sadm/install/gz-only-packages'
GlobalOnly = False
if os.path.isfile(file):
f = open(file)
for line in f:
if line.startswith("opensvc"):
print("OpenSVC package was previously installed with pkgadd -G\n")
GlobalOnly = True
cmd = ['pkgrm', '-n', 'opensvc']
print(' '.join(cmd))
p = Popen(cmd)
p.communicate()
if p.returncode != 0:
return 1
osver = get_solaris_version()
if osver < 10.0:
opts = ''
else:
if GlobalOnly is True:
opts = '-G'
else:
opts = ''
admin = gen_adminfile()
opts += " -a %s " % admin
cmd = 'pkgadd %s -d %s all' % (opts, fpath)
print(cmd)
return os.system(cmd)
opensvc-1.8~20170412/lib/resIpDockerLinux.py 0000644 0001750 0001750 00000032551 13073467726 020643 0 ustar jkelbert jkelbert import os
import resIpLinux as Res
import rcExceptions as ex
import rcIfconfigLinux as rcIfconfig
import rcStatus
from rcUtilitiesLinux import check_ping
from rcUtilities import which, justcall, to_cidr, lazy
class Ip(Res.Ip):
def __init__(self,
rid=None,
ipdev=None,
ipname=None,
mask=None,
gateway=None,
network=None,
del_net_route=False,
container_rid=None,
**kwargs):
Res.Ip.__init__(self,
rid,
type="ip.docker",
ipdev=ipdev,
ipname=ipname,
gateway=gateway,
mask=mask,
**kwargs)
self.network = network
self.del_net_route = del_net_route
self.container_rid = str(container_rid)
self.label = str(ipname) + '@' + ipdev + '@' + self.container_rid
self.tags.add("docker")
self.tags.add(container_rid)
def on_add(self):
self.svc.register_dependency("start", self.rid, self.container_rid)
self.svc.register_dependency("start", self.container_rid, self.rid)
self.svc.register_dependency("stop", self.container_rid, self.rid)
@lazy
def guest_dev(self):
"""
Find a free eth netdev.
Execute a ip link command in the container net namespace to parse
used eth netdevs.
"""
nspid = self.get_nspid()
cmd = ["ip", "netns", "exec", nspid, "ip" , "link"]
out, err, ret = justcall(cmd)
used = []
for line in out.splitlines():
if ": eth" not in line:
continue
idx = line.split()[1].replace(":", "").replace("eth", "")
if "@" in idx:
# strip "@if" suffix
idx = idx[:idx.index("@")]
used.append(int(idx))
idx = 0
while True:
idx += 1
if idx not in used:
return "eth%d" % idx
@lazy
def container(self):
if self.container_rid not in self.svc.resources_by_id:
raise ex.excError("rid %s not found" % self.container_rid)
return self.svc.resources_by_id[self.container_rid]
def container_id(self, refresh=False):
return self.svc.dockerlib.get_container_id_by_name(self.container, refresh=refresh)
def arp_announce(self):
""" disable the generic arping. We do that in the guest namespace.
"""
pass
def get_docker_ifconfig(self):
try:
nspid = self.get_nspid()
except ex.excError as e:
return
if nspid is None:
return
self.create_netns_link(nspid=nspid, verbose=False)
cmd = ["ip", "netns", "exec", nspid, "ip", "addr"]
out, err, ret = justcall(cmd)
if ret != 0:
return
self.delete_netns_link(nspid=nspid, verbose=False)
ifconfig = rcIfconfig.ifconfig(ip_out=out)
return ifconfig
def is_up(self):
ifconfig = self.get_docker_ifconfig()
if ifconfig is None:
return False
if ifconfig.has_param("ipaddr", self.addr):
return True
return False
def get_docker_interface(self):
ifconfig = self.get_docker_ifconfig()
if ifconfig is None:
return
for intf in ifconfig.intf:
if self.addr in intf.ipaddr+intf.ip6addr:
name = intf.name
if "@" in name:
name = name[:name.index("@")]
return name
return
def container_running_elsewhere(self):
if not self.container.docker_service:
return False
if len(self.container.service_hosted_instances()) == 0 and \
len(self.container.service_running_instances()) > 0:
return True
return False
def _status(self, verbose=False):
if self.container_running_elsewhere():
self.status_log("%s is hosted by another host" % self.container_rid, "info")
return rcStatus.NA
return Res.Ip._status(self)
def startip_cmd(self):
if self.container_running_elsewhere():
return 0, "", ""
if "dedicated" in self.tags:
self.log.info("dedicated mode")
return self.startip_cmd_dedicated()
else:
return self.startip_cmd_shared()
def startip_cmd_shared(self):
if os.path.exists("/sys/class/net/%s/bridge" % self.ipdev):
self.log.info("bridge mode")
return self.startip_cmd_shared_bridge()
else:
self.log.info("macvlan mode")
return self.startip_cmd_shared_macvlan()
def startip_cmd_dedicated(self):
nspid = self.get_nspid()
self.create_netns_link(nspid=nspid)
# assign interface to the nspid
cmd = ["ip", "link", "set", self.ipdev, "netns", nspid, "name", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# plumb the ip
cmd = ["ip", "netns", "exec", nspid, "ip", "addr", "add", "%s/%s" % (self.addr, to_cidr(self.mask)), "dev", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# activate
cmd = ["ip", "netns", "exec", nspid, "ip", "link", "set", self.guest_dev, "up"]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# add default route
if self.gateway:
cmd = ["ip", "netns", "exec", nspid, "ip", "route", "add", "default", "via", self.gateway, "dev", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# announce
if which("arping") is not None:
cmd = ["ip", "netns", "exec", nspid, "arping" , "-c", "1", "-A", "-I", self.guest_dev, self.addr]
self.log.info(" ".join(cmd))
out, err, ret = justcall(cmd)
self.delete_netns_link(nspid=nspid)
return 0, "", ""
def startip_cmd_shared_bridge(self):
nspid = self.get_nspid()
self.create_netns_link(nspid=nspid)
tmp_guest_dev = "v%spg%s" % (self.guest_dev, nspid)
tmp_local_dev = "v%spl%s" % (self.guest_dev, nspid)
mtu = self.ip_get_mtu()
# create peer devs
cmd = ["ip", "link", "add", "name", tmp_local_dev, "mtu", mtu, "type", "veth", "peer", "name", tmp_guest_dev, "mtu", mtu]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# activate the parent dev
cmd = ["ip", "link", "set", tmp_local_dev, "master", self.ipdev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
cmd = ["ip", "link", "set", tmp_local_dev, "up"]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# assign the macvlan interface to the container namespace
cmd = ["ip", "link", "set", tmp_guest_dev, "netns", nspid]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# rename the tmp guest dev
cmd = ["ip", "netns", "exec", nspid, "ip", "link", "set", tmp_guest_dev, "name", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# plumb ip
cmd = ["ip", "netns", "exec", nspid, "ip", "addr", "add", self.addr+"/"+to_cidr(self.mask), "dev", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# setup default route
self.ip_setup_route(nspid)
self.ip_wait()
self.delete_netns_link(nspid=nspid)
return 0, "", ""
def startip_cmd_shared_macvlan(self):
nspid = self.get_nspid()
self.create_netns_link(nspid=nspid)
tmp_guest_dev = "ph%s%s" % (nspid, self.guest_dev)
mtu = self.ip_get_mtu()
# create a macvlan interface
cmd = ["ip", "link", "add", "link", self.ipdev, "dev", tmp_guest_dev, "mtu", mtu, "type", "macvlan", "mode", "bridge"]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# activate the parent dev
cmd = ["ip", "link", "set", self.ipdev, "up"]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# assign the macvlan interface to the container namespace
cmd = ["ip", "link", "set", tmp_guest_dev, "netns", nspid]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# rename the tmp guest dev
cmd = ["ip", "netns", "exec", nspid, "ip", "link", "set", tmp_guest_dev, "name", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# plumb the ip
cmd = ["ip", "netns", "exec", nspid, "ip", "addr", "add", "%s/%s" % (self.addr, to_cidr(self.mask)), "dev", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# setup default route
self.ip_setup_route(nspid)
self.ip_wait()
self.delete_netns_link(nspid=nspid)
return 0, "", ""
def ip_get_mtu(self):
# get mtu
cmd = ["ip", "link", "show", self.ipdev]
ret, out, err = self.call(cmd)
if ret != 0:
raise ex.excError("failed to get %s mtu: %s" % (self.ipdev, err))
mtu = out.split()[4]
return mtu
def ip_setup_route(self, nspid):
cmd = ["ip", "netns", "exec", nspid, "ip", "route", "del", "default"]
ret, out, err = self.call(cmd, errlog=False)
cmd = ["ip", "netns", "exec", nspid, "ip", "link", "set", self.guest_dev, "up"]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
if self.gateway:
cmd = ["ip", "netns", "exec", nspid, "ip", "route", "replace", "default", "via", self.gateway]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
if self.del_net_route and self.network:
cmd = ["ip", "netns", "exec", nspid, "ip", "route", "del", self.network+"/"+to_cidr(self.mask), "dev", self.guest_dev]
ret, out, err = self.vcall(cmd)
if ret != 0:
return ret, out, err
# announce
if which("arping") is not None:
cmd = ["ip", "netns", "exec", nspid, "arping" , "-c", "1", "-A", "-I", self.guest_dev, self.addr]
self.log.info(" ".join(cmd))
out, err, ret = justcall(cmd)
def ip_wait(self):
# ip activation may still be incomplete
# wait for activation, to avoid startapp scripts to fail binding their listeners
for _ in range(15, 0, -1):
if check_ping(self.addr, timeout=1, count=1):
return
raise ex.excError("timed out waiting for ip activation")
def get_nspid(self):
container_id = self.container_id(refresh=True)
if container_id is None:
return
cmd = self.svc.dockerlib.docker_cmd + ["inspect", "--format='{{ .State.Pid }}'", container_id]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("failed to get nspid: %s" % err)
nspid = out.strip()
if "'" in nspid:
nspid = nspid.replace("'","")
if nspid == "0":
raise ex.excError("nspid is 0")
return nspid
def delete_netns_link(self, nspid=None, verbose=True):
if nspid is None:
nspid = self.get_nspid()
run_d = "/var/run/netns"
if not os.path.exists(run_d):
return
run_netns = os.path.join(run_d, nspid)
try:
os.unlink(run_netns)
if verbose:
self.log.info("remove %s" % run_netns)
except:
pass
def create_netns_link(self, nspid=None, verbose=True):
if nspid is None:
nspid = self.get_nspid()
if nspid is None:
raise ex.excError("can not determine nspid")
run_d = "/var/run/netns"
if not os.path.exists(run_d):
os.makedirs(run_d)
run_netns = os.path.join(run_d, nspid)
proc_netns = "/proc/%s/ns/net" % nspid
if os.path.exists(proc_netns) and not os.path.exists(run_netns):
if verbose:
self.log.info("create symlink %s -> %s" % (run_netns, proc_netns))
os.symlink(proc_netns, run_netns)
# keep a reference on the run_netns file to prevent its removal during our run
try:
self.ns_fd = open(run_netns, "r")
except:
self.ns_fd = None
def stopip_cmd(self):
intf = self.get_docker_interface()
nspid = self.get_nspid()
self.create_netns_link(nspid=nspid)
if intf is None:
raise ex.excContinueAction("can't find on which interface %s is plumbed in container %s" % (self.addr, self.container_id()))
if self.mask is None:
raise ex.excContinueAction("netmask is not set")
cmd = ["ip", "netns", "exec", nspid, "ip", "addr", "del", self.addr+"/"+to_cidr(self.mask), "dev", intf]
ret, out, err = self.vcall(cmd)
self.delete_netns_link(nspid=nspid)
return ret, out, err
opensvc-1.8~20170412/lib/rcDiskInfoOSF1.py 0000644 0001750 0001750 00000005462 13073467726 020076 0 ustar jkelbert jkelbert from __future__ import print_function
from rcUtilities import justcall
import rcDiskInfo
import re
regex = re.compile("^\W*[0-9]*:")
regex_path = re.compile('\W*[0-9]*\W+')
class diskInfo(rcDiskInfo.diskInfo):
def __init__(self):
self.load_cache()
def is_id(self, line):
if regex.match(line) is None:
return False
return True
def cache_add(self, id, dev, wwid, path_count):
d = self.devattr(id)
vid = d['manufacturer']
pid = d['model']
size = d['mb']
self.h[dev] = dict(
wwid=wwid,
vid=vid,
pid=pid,
size=size,
id=id,
path_count=path_count
)
def load_cache(self):
self.h = {}
cmd = ["hwmgr", "show", "scsi", "-type", "disk", "-active", "-full"]
out, err, ret = justcall(cmd)
path_count = -1
for e in out.split('\n'):
if len(e) == 0:
continue
if self.is_id(e):
if path_count >= 0:
self.cache_add(id, dev, wwid, path_count)
l = e.split()
if len(l) < 8:
continue
id = l[0].strip(':')
dev = l[7]
path_count = 0
elif 'WWID' in e:
wwid = e.split(":")[-1].replace('-','').lower()
wwid = wwid.strip('"').replace(" ", "_")
elif regex_path.match(e) is not None and 'valid' in e:
path_count += 1
if path_count >= 0:
self.cache_add(id, dev, wwid, path_count)
def devattr(self, id):
d = {'capacity': 0, 'block_size': 0, 'manufacturer': '', 'model': '', 'mb': 0}
cmd = ["hwmgr", "get", "att", "-id", id,
"-a", "model",
"-a", "manufacturer",
"-a", "capacity",
"-a", "block_size"]
out, err, ret = justcall(cmd)
if ret != 0:
return d
for line in out.split("\n"):
if not line.startswith(' '):
continue
l = line.split('=')
if len(l) !=2:
continue
d[l[0].strip()] = l[1].strip()
d['mb'] = int(d['capacity']) * int(d['block_size']) // 1024 // 1024
return d
def get(self, dev, type):
dev = dev.replace('/dev/rdisk/','')
dev = dev.replace('/dev/disk/','')
if dev not in self.h:
return
return self.h[dev][type]
def disk_id(self, dev):
return self.get(dev, 'wwid')
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
return self.get(dev, 'size')
if __name__ == "__main__":
di = diskInfo()
print(di.h.items())
opensvc-1.8~20170412/lib/hostidOSF1.py 0000644 0001750 0001750 00000000102 13073467726 017317 0 ustar jkelbert jkelbert from uuid import getnode
def hostid():
return str(getnode())
opensvc-1.8~20170412/lib/snapVxfsHP-UX.py 0000644 0001750 0001750 00000005275 13073467726 020006 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import which, qcall, protected_mount
import rcExceptions as ex
import snap
class Snap(snap.Snap):
def lv_exists(self, device):
if qcall(['lvdisplay', device]) == 0:
return True
return False
def lv_info(self, device):
(ret, buff, err) = self.call(['lvdisplay', device])
if ret != 0:
return (None, None, None)
vg_name = None
lv_name = None
lv_size = 0
for line in buff.split('\n'):
if "VG Name" in line:
vg_name = line.split()[-1]
if "LV Name" in line:
lv_name = line.split()[-1]
if "LV Size" in line:
lv_size = int(line.split()[-1])
return (vg_name, lv_name, lv_size)
def snapcreate(self, m):
snap_name = ''
snap_mnt = ''
(vg_name, lv_name, lv_size) = self.lv_info(m.device)
if lv_name is None:
self.log.error("can not snap %s: not a logical volume"%m.device)
raise ex.syncNotSnapable
snap_name = 'osvc_sync_'+os.path.basename(lv_name)
if self.lv_exists(os.path.join(vg_name, snap_name)):
self.log.error("snap of %s already exists"%(lv_name))
raise ex.syncSnapExists
(ret, buff, err) = self.vcall(['lvcreate', '-L', str(lv_size//10)+'M', '-n', snap_name, vg_name])
if ret != 0:
raise ex.syncSnapCreateError
snap_mnt = '/service/tmp/osvc_sync_'+os.path.basename(vg_name)+'_'+os.path.basename(lv_name)
if not os.path.exists(snap_mnt):
os.makedirs(snap_mnt, 0o755)
snap_dev = os.path.join(vg_name, snap_name)
(ret, buff, err) = self.vcall(['mount', '-F', 'vxfs', '-o', 'ro,snapof='+m.device, snap_dev, snap_mnt])
if ret != 0:
raise ex.syncSnapMountError
self.snaps[m.mount_point] = dict(lv_name=lv_name,
vg_name=vg_name,
snap_name=snap_name,
snap_mnt=snap_mnt,
snap_dev=snap_dev)
def snapdestroykey(self, s):
if protected_mount(self.snaps[s]['snap_mnt']):
self.log.error("the snapshot is no longer mounted in %s. panic."%self.snaps[s]['snap_mnt'])
raise ex.excError
""" fuser on HP-UX outs to stderr ...
"""
cmd = ['fuser', '-kc', self.snaps[s]['snap_mnt']]
ret = qcall(cmd)
cmd = ['umount', self.snaps[s]['snap_mnt']]
(ret, out, err) = self.vcall(cmd)
cmd = ['lvremove', '-f', self.snaps[s]['snap_dev']]
(ret, buff, err) = self.vcall(cmd)
opensvc-1.8~20170412/lib/rcSymmetrix.py 0000644 0001750 0001750 00000066110 13073467726 017735 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
import os
import ConfigParser
import json
import time
from xml.etree.ElementTree import XML, fromstring
import rcExceptions as ex
from rcGlobalEnv import rcEnv, Storage
from rcUtilities import justcall, which, convert_size
from rcOptParser import OptParser
from optparse import Option
PROG = "nodemgr array"
OPT = Storage({
"help": Option(
"-h", "--help", action="store_true", dest="parm_help",
help="show this help message and exit"),
"array": Option(
"-a", "--array", action="store", dest="array_name",
help="The name of the array, as defined in auth.conf"),
"name": Option(
"--name", action="store", dest="name",
help="The device identifier name (ex: mysvc_1)"),
"dev": Option(
"--dev", action="store", dest="dev",
help="The device id (ex: 0A04)"),
"mappings": Option(
"--mappings", action="append", dest="mappings",
help="A :,,... mapping used in add map in replacement of --targetgroup and --initiatorgroup. Can be specified multiple times."),
"size": Option(
"--size", action="store", dest="size",
help="The disk size, expressed as a size expression like 1g, 100mib, ..."),
"slo": Option(
"--slo", action="store", dest="slo",
help="The thin device Service Level Objective."),
"srp": Option(
"--srp", action="store", dest="srp",
help="The Storage Resource Pool hosting the device."),
})
GLOBAL_OPTS = [
OPT.array,
]
DEPRECATED_ACTIONS = []
ACTIONS = {
"Generic actions": {
"add_disk": {
"msg": "Add and present a thin device.",
"options": [
OPT.name,
OPT.size,
OPT.mappings,
OPT.slo,
OPT.srp,
],
},
"add_map": {
"msg": "Present a device.",
"options": [
OPT.dev,
OPT.mappings,
OPT.slo,
OPT.srp,
],
},
"del_disk": {
"msg": "Unpresent and delete a thin device.",
"options": [
OPT.dev,
],
},
"del_map": {
"msg": "Unpresent a device.",
"options": [
OPT.dev,
],
},
"rename_disk": {
"msg": "Rename a device.",
"options": [
OPT.dev,
OPT.name,
],
},
"resize_disk": {
"msg": "Resize a thin device.",
"options": [
OPT.dev,
OPT.size,
],
},
},
"Low-level actions": {
"add_tdev": {
"msg": "Add a thin device. No masking.",
"options": [
OPT.name,
OPT.size,
],
},
"del_tdev": {
"msg": "Delete a thin device. No unmasking.",
"options": [
OPT.dev,
],
},
"list_pools": {
"msg": "List thin pools.",
},
"list_sgs": {
"msg": "List storage groups.",
},
"list_tdevs": {
"msg": "List thin devices.",
"options": [
OPT.dev,
],
},
"list_views": {
"msg": "List views, eg. groups of initiators/targets/devices.",
"options": [
OPT.dev,
],
},
},
}
class Arrays(object):
arrays = []
def find_symcli_path(self):
symcli_bin = which("symcli")
if symcli_bin is not None:
return os.path.dirname(symcli_bin)
symcli_bin = "/usr/symcli/bin/symcli"
if os.path.exists(symcli_bin):
return os.path.dirname(symcli_bin)
symcli_bin = "/opt/emc/SYMCLI/bin/symcli"
if os.path.exists(symcli_bin):
return os.path.dirname(symcli_bin)
def __init__(self, objects=[]):
self.objects = objects
if len(objects) > 0:
self.filtering = True
else:
self.filtering = False
cf = rcEnv.authconf
if not os.path.exists(cf):
return
conf = ConfigParser.RawConfigParser()
conf.read(cf)
self.symms = []
for s in conf.sections():
if self.filtering and s not in self.objects:
continue
try:
stype = conf.get(s, 'type')
except:
continue
if stype != "symmetrix":
continue
try:
name = s
except:
print("error parsing section", s, file=sys.stderr)
continue
if conf.has_option(s, 'symcli_path'):
symcli_path = conf.get(s, 'symcli_path')
else:
symcli_path = find_symcli_path()
if symcli_path is None:
print("symcli path not found for array", s, file=sys.stderr)
continue
if conf.has_option(s, 'symcli_connect'):
symcli_connect = conf.get(s, 'symcli_connect')
os.environ["SYMCLI_CONNECT"] = symcli_connect
else:
symcli_connect = None
if conf.has_option(s, 'username'):
username = conf.get(s, 'username')
else:
username = None
if conf.has_option(s, 'password'):
password = conf.get(s, 'password')
else:
password = None
symcfg = os.path.join(symcli_path, "symcfg")
if which(symcfg) is None:
raise ex.excError('can not find symcfg in %s' % symcli_path)
out, err, ret = justcall([symcfg, 'list', '-sid', name, '-output', 'xml_element'])
if ret != 0:
print(err, file=sys.stderr)
continue
tree = fromstring(out)
for symm in tree.getiterator('Symm_Info'):
model = symm.find('model').text
if model.startswith('VMAX'):
self.arrays.append(Vmax(name, symcli_path, symcli_connect, username, password))
elif 'DMX' in model or '3000-M' in model:
self.arrays.append(Dmx(name, symcli_path, symcli_connect, username, password))
else:
print("unsupported sym model: %s" % model, file=sys.stderr)
del(conf)
def get_array(self, name):
for array in self.arrays:
if array.sid == name:
return array
def __iter__(self):
for array in self.arrays:
yield(array)
class Sym(object):
def __init__(self, sid, symcli_path, symcli_connect, username, password):
self.keys = [
'sym_info',
'sym_dir_info',
'sym_dev_info',
'sym_dev_wwn_info',
'sym_dev_name_info',
'sym_devrdfa_info',
'sym_ficondev_info',
'sym_meta_info',
'sym_disk_info',
'sym_diskgroup_info',
]
self.sid = sid
self.symcli_path = symcli_path
self.symcli_connect = symcli_connect
self.username = username
self.password = password
def set_environ(self):
if self.symcli_connect:
os.environ["SYMCLI_CONNECT"] = self.symcli_connect
elif "SYMCLI_CONNECT" in os.environ:
del os.environ["SYMCLI_CONNECT"]
def symcmd(self, cmd, xml=True):
self.set_environ()
cmd += ['-sid', self.sid]
if xml:
cmd += ['-output', 'xml_element']
return justcall(cmd)
def symsg(self, cmd, xml=True):
cmd = ['/usr/symcli/bin/symsg'] + cmd
return self.symcmd(cmd, xml=xml)
def symcfg(self, cmd, xml=True):
cmd = ['/usr/symcli/bin/symcfg'] + cmd
return self.symcmd(cmd, xml=xml)
def symdisk(self, cmd, xml=True):
cmd = ['/usr/symcli/bin/symdisk'] + cmd
return self.symcmd(cmd, xml=xml)
def symconfigure(self, cmd, xml=True):
cmd = ['/usr/symcli/bin/symconfigure'] + cmd
return self.symcmd(cmd, xml=xml)
def symdev(self, cmd, xml=True):
cmd = ['/usr/symcli/bin/symdev'] + cmd
return self.symcmd(cmd, xml=xml)
def get_sym_info(self):
out, err, ret = self.symcfg(["list"])
return out
def get_sym_dir_info(self):
out, err, ret = self.symcfg(['-dir', 'all', '-v', 'list'])
return out
def get_sym_dev_info(self):
out, err, ret = self.symdev(['list'])
return out
def get_sym_dev_wwn_info(self):
out, err, ret = self.symdev(['list', '-wwn'])
return out
def get_sym_devrdfa_info(self):
out, err, ret = self.symdev(['list', '-v', '-rdfa'])
return out
def get_sym_ficondev_info(self):
out, err, ret = self.symdev(['list', '-ficon'])
return out
def get_sym_meta_info(self):
out, err, ret = self.symdev(['list', '-meta', '-v'])
return out
def get_sym_dev_name_info(self):
out, err, ret = self.symdev(['list', '-identifier', 'device_name'])
return out
def get_sym_disk_info(self):
out, err, ret = self.symdisk(['list', '-v'])
return out
def get_sym_diskgroup_info(self):
out, err, ret = self.symdisk(['list', '-dskgrp_summary'])
return out
def get_sym_pool_info(self):
out, err, ret = self.symcfg(['-pool', 'list', '-v'])
return out
def get_sym_tdev_info(self):
out, err, ret = self.symcfg(['list', '-tdev', '-detail'])
return out
def get_sym_srp_info(self):
out, err, ret = self.symcfg(['list', '-srp', '-detail', '-v'])
return out
def get_sym_slo_info(self):
out, err, ret = self.symcfg(['list', '-slo', '-detail', '-v'])
return out
def get_sym_sg_info(self):
out, err, ret = self.symsg(['list', '-v'])
return out
def parse_xml(self, buff, key=None, as_list=[], exclude=[]):
tree = fromstring(buff)
data = []
def parse_elem(elem, as_list=[], exclude=[]):
d = {}
for e in list(elem):
if e.tag in exclude:
continue
if e.text.startswith("\n"):
child = parse_elem(e, as_list, exclude)
if e.tag in as_list:
if e.tag not in d:
d[e.tag] = []
d[e.tag].append(child)
else:
d[e.tag] = child
else:
d[e.tag] = e.text
return d
for elem in tree.getiterator(key):
data.append(parse_elem(elem, as_list, exclude))
return data
def get_sym_dev_wwn(self, dev):
out, err, ret = self.symdev(['list', '-devs', dev, '-wwn'])
return self.parse_xml(out, key="Device")
def list_pools(self, **kwargs):
print(json.dumps(self.get_pools(), indent=4))
def get_pools(self, **kwargs):
out = self.get_sym_pool_info()
data = self.parse_xml(out, key="DevicePool")
return data
def get_sgs(self, **kwargs):
out = self.get_sym_sg_info()
data = self.parse_xml(out, key="SG_Info")
return data
def list_sgs(self, **kwargs):
print(json.dumps(self.get_sgs(), indent=4))
def load_names(self):
out = self.get_sym_dev_name_info()
l = self.parse_xml(out, key="Dev_Info")
self.dev_names = {}
for d in l:
self.dev_names[d["dev_name"]] = d["dev_ident_name"]
def list_tdevs(self, dev=None, **kwargs):
self.load_names()
data = self.get_tdevs(dev)
for i, d in enumerate(data):
if d["dev_name"] not in self.dev_names:
continue
data[i]["dev_ident_name"] = self.dev_names[d["dev_name"]]
print(json.dumps(data, indent=4))
def get_tdevs(self, dev=None, **kwargs):
if dev:
out, err, ret = self.symcfg(['list', '-devs', dev, '-tdev', '-detail'])
else:
out, err, ret = self.symcfg(['list', '-tdev', '-detail'])
data = self.parse_xml(out, key="Device", as_list=["pool"])
return data
def list_views(self, dev=None, **kwargs):
if dev is None:
print(json.dumps(self.get_views(), indent=4))
return
views = self.get_dev_views(dev)
l = []
for view in views:
out, err, ret = self.symaccesscmd(["show", "view", view])
if out.strip() == "":
continue
l.append(self.parse_xml(out, key="View_Info", as_list=["Initiators", "Director_Identification", "SG", "Device", "dev_port_info"], exclude=["Initiator_List"]))
print(json.dumps(l, indent=4))
def get_views(self, **kwargs):
out = self.get_sym_view_aclx()
if out.strip() == "":
return []
data = self.parse_xml(out, key="View_Info", as_list=["Initiators", "Director_Identification", "SG", "Device", "dev_port_info"], exclude=["Initiator_List"])
return data
def get_dev_views(self, dev):
sgs = self.get_dev_sgs(dev)
views = set()
for sg in sgs:
out, err, ret = self.symaccesscmd(["show", sg, "-type", "storage"])
if out.strip() == "":
continue
data = self.parse_xml(out, key="Mask_View_Names")
views |= set([d["view_name"] for d in data])
return views
def get_initiator_views(self, wwn):
out, err, ret = self.symaccesscmd(["list", "-type", "initiator", "-wwn", wwn])
if out.strip() == "":
return []
data = self.parse_xml(out, key="Mask_View_Names")
return [d["view_name"] for d in data]
def get_view(self, view):
out, err, ret = self.symaccesscmd(["show", "view", view])
data = self.parse_xml(out, key="View_Info", as_list=["Director_Identification", "SG_Child_info", "Initiator", "Device", "dev_port_info"], exclude=["Initiators"])
return data[0]
def get_mapping_storage_groups(self, hba_id, tgt_id):
l = set()
for view in self.get_initiator_views(hba_id):
view_data = self.get_view(view)
if "port_info" not in view_data:
continue
if "Director_Identification" not in view_data["port_info"]:
continue
ports = [e["port_wwn"] for e in view_data["port_info"]["Director_Identification"]]
if tgt_id not in ports:
continue
sg = view_data["stor_grpname"]
if sg not in self.sg_mappings:
self.sg_mappings[sg] = []
self.sg_mappings[sg].append({
"sg": sg,
"view_name": view_data["view_name"],
"hba_id": hba_id,
"tgt_id": tgt_id,
})
l.add(view_data["stor_grpname"])
return l
def translate_mappings(self, mappings):
sgs = set()
for mapping in mappings:
elements = mapping.split(":")
hba_id = elements[0]
targets = elements[-1].split(",")
for tgt_id in targets:
sgs |= self.get_mapping_storage_groups(hba_id, tgt_id)
return sgs
class Vmax(Sym):
def __init__(self, sid, symcli_path, symcli_connect, username, password):
Sym.__init__(self, sid, symcli_path, symcli_connect, username, password)
self.keys += [
'sym_ig_aclx',
'sym_pg_aclx',
'sym_sg_aclx',
'sym_view_aclx',
'sym_pool_info',
'sym_tdev_info',
'sym_sg_info',
'sym_srp_info',
'sym_slo_info',
]
self.sg_mappings = {}
if 'SYMCLI_DB_FILE' in os.environ:
dir = os.path.dirname(os.environ['SYMCLI_DB_FILE'])
# flat format
self.aclx = os.path.join(dir, sid+'.aclx')
if not os.path.exists(self.aclx):
# emc grab format
import glob
files = glob.glob(os.path.join(dir, sid, sid+'*.aclx'))
if len(files) == 1:
self.aclx = files[0]
if not os.path.exists(self.aclx):
print("missing file %s"%self.aclx, file=sys.stderr)
else:
self.aclx = None
def symaccesscmd(self, cmd, xml=True):
self.set_environ()
cmd = ['/usr/symcli/bin/symaccess'] + cmd
if self.aclx is None:
cmd += ['-sid', self.sid]
else:
cmd += ['-file', self.aclx]
if xml:
cmd += ['-output', 'xml_element']
return justcall(cmd)
def get_sym_pg_aclx(self):
cmd = ['list', '-type', 'port']
out, err, ret = self.symaccesscmd(cmd)
return out
def get_sym_sg_aclx(self):
cmd = ['list', '-type', 'storage']
out, err, ret = self.symaccesscmd(cmd)
return out
def get_sym_ig_aclx(self):
cmd = ['list', '-type', 'initiator']
out, err, ret = self.symaccesscmd(cmd)
return out
def get_sym_view_aclx(self):
cmd = ['list', 'view', '-detail']
out, err, ret = self.symaccesscmd(cmd)
return out
def add_tdev(self, name=None, size=None, **kwargs):
"""
create dev count=,
size = [MB | GB | CYL],
emulation=,
config=
[, preallocate size =
[, allocate_type = PERSISTENT]]
[, remote_config=, ra_group=]
[, sg= [, remote_sg=]]
[, mapping to dir
[starting] target = ,
lun=, vbus=
[starting] base_address = [,...]]
[, device_attr =
[,...]]
[, device_name=''[,number= ]];
"""
if size is None:
raise ex.excError("The '--size' parameter is mandatory")
size = convert_size(size, _to="MB")
_cmd = "create dev count=1, size= %d MB, emulation=FBA, config=TDEV, device_attr=SCSI3_PERSIST_RESERV" % (size)
if name:
_cmd += ", device_name=%s" % name
_cmd += ";"
cmd = ["-cmd", _cmd, "commit", "-noprompt"]
out, err, ret = self.symconfigure(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
for line in out.splitlines():
line = line.strip()
if line.startswith("New symdev:"):
l = line.split()
if len(l) < 3:
raise ex.excError("unable to determine the created SymDevName")
dev = l[2]
data = self.get_sym_dev_wwn(dev)[0]
return data
raise ex.excError("unable to determine the created SymDevName")
def rename_disk(self, dev=None, name=None, **kwargs):
if dev is None:
raise ex.excError("--dev is mandatory")
if name is None:
raise ex.excError("--name is mandatory")
_cmd = "set dev %s device_name='%s';" % (dev, name)
cmd = ["-cmd", _cmd, "commit", "-noprompt"]
out, err, ret = self.symconfigure(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
def resize_disk(self, dev=None, size=None, **kwargs):
if dev is None:
raise ex.excError("The '--dev' parameter is mandatory")
if size is None:
raise ex.excError("The '--size' parameter is mandatory")
size = convert_size(size, _to="MB")
cmd = ["modify", dev, "-tdev", "-cap", str(size), "-captype", "mb", "-noprompt"]
out, err, ret = self.symdev(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
def del_tdev(self, dev=None, **kwargs):
if dev is None:
raise ex.excError("The '--dev' parameter is mandatory")
data = self.get_sym_dev_wwn(dev)[0]
cmd = ["delete", dev, "-noprompt"]
out, err, ret = self.symdev(cmd, xml=False)
if ret != 0:
raise ex.excError(err)
self.del_diskinfo(data["wwn"])
def del_disk(self, dev=None, **kwargs):
self.del_map(dev)
self.free_tdev(dev)
self.del_tdev(dev=dev, **kwargs)
def del_map(self, dev, **kwargs):
for sg in self.get_dev_sgs(dev):
self.del_tdev_from_sg(dev, sg)
def free_tdev(self, dev):
out, err, ret = self.symdev(["free", "-devs", dev, "-all", "-noprompt"], xml=False)
while True:
if self.tdev_freed(dev):
break
time.sleep(5)
def tdev_freed(self, dev):
out, err, ret = self.symcfg(["verify", "-tdevs", "-devs", dev, "-freeingall"], xml=False)
if out.strip().split()[0] == "None":
return True
return False
def add_tdev_to_sg(self, dev, sg):
cmd = ["-name", sg, "-type", "storage", "add", "dev", dev]
out, err, ret = self.symaccesscmd(cmd, xml=False)
if ret != 0:
print(err, file=sys.stderr)
return out, err, ret
def del_tdev_from_sg(self, dev, sg):
cmd = ["-name", sg, "-type", "storage", "remove", "dev", dev, "-unmap"]
out, err, ret = self.symaccesscmd(cmd, xml=False)
if ret != 0:
print(err, file=sys.stderr)
return out, err, ret
def get_dev_sgs(self, dev):
out, err, ret = self.symaccesscmd(["list", "-type", "storage", "-devs", dev])
data = self.parse_xml(out, key="Group_Info")
return [d["group_name"] for d in data]
def get_sg(self, sg):
out, err, ret = self.symsg(["show", sg])
data = self.parse_xml(out, key="SG_Info")
return data[0]
def filter_sgs(self, sgs, srp=None, slo=None):
filtered_sgs = []
if srp is None and slo is None:
return sgs
for sg in sgs:
data = self.get_sg(sg)
if srp and data["SRP_name"] != srp:
continue
if slo and data["SLO_name"] != slo:
continue
filtered_sgs.append(sg)
return filtered_sgs
def get_lun(self, dev, hba_id, tgt_id, view_name):
view = self.get_view(view_name)
port = None
for port in view["port_info"]["Director_Identification"]:
if port["port_wwn"] == tgt_id:
port_id = port["port"]
break
if port is None:
return
for device in view["Device"]:
if device["dev_name"] != dev:
continue
for port in device["dev_port_info"]:
if port_id == port["port"]:
return port["host_lun"]
def get_mappings(self, dev):
mappings = {}
for sg in self.get_dev_sgs(dev):
for sg, l in self.sg_mappings.items():
for d in l:
d["lun"] = self.get_lun(dev, d["hba_id"], d["tgt_id"], d["view_name"])
if d["lun"] is None:
continue
mappings[d["hba_id"] + ":" + d["tgt_id"]] = d
return mappings
def add_disk(self, name=None, size=None, slo=None, srp=None, mappings={}, **kwargs):
sgs = self.mappings_to_sgs(mappings, slo, srp)
dev_data = self.add_tdev(name, size, **kwargs)
self.add_map(dev_data["dev_name"], mappings, slo, srp, sgs)
self.push_diskinfo(dev_data, name, size, srp, sgs)
mappings = {}
results = {
"disk_id": dev_data["wwn"],
"disk_devid": dev_data["dev_name"],
"mappings": self.get_mappings(dev_data["dev_name"]),
"driver_data": {
"dev": dev_data,
},
}
return results
def add_map(self, dev, mappings={}, slo=None, srp=None, sgs=None, **kwargs):
if sgs is None:
sgs = self.mappings_to_sgs(mappings, slo, srp)
for sg in sgs:
self.add_tdev_to_sg(dev, sg)
def mappings_to_sgs(self, mappings, slo, srp):
sgs = self.translate_mappings(mappings)
if len(sgs) == 0:
raise ex.excError("no storage group found for the requested mappings")
sgs = self.filter_sgs(sgs, srp=srp, slo=slo)
return sgs
def del_diskinfo(self, disk_id):
if disk_id in (None, ""):
return
if self.node is None:
return
try:
ret = self.node.collector_rest_delete("/disks/%s" % disk_id)
except Exception as exc:
raise ex.excError(str(exc))
if "error" in ret:
raise ex.excError(ret["error"])
return ret
def push_diskinfo(self, data, name, size, srp, sgs):
if self.node is None:
return
try:
ret = self.node.collector_rest_post("/disks", {
"disk_id": data["wwn"],
"disk_devid": data["dev_name"],
"disk_name": name if name else "",
"disk_size": convert_size(size, _to="MB"),
"disk_alloc": 0,
"disk_arrayid": self.sid,
"disk_group": srp,
})
except Exception as exc:
raise ex.excError(str(exc))
if "error" in data:
raise ex.excError(ret["error"])
return ret
class Dmx(Sym):
def __init__(self, sid):
Sym.__init__(self, sid)
self.keys += ['sym_maskdb']
if 'SYMCLI_DB_FILE' in os.environ:
dir = os.path.dirname(os.environ['SYMCLI_DB_FILE'])
# flat format
self.maskdb = os.path.join(dir, sid+'.bin')
if not os.path.exists(self.maskdb):
# emc grab format
self.maskdb = os.path.join(dir, sid, 'symmaskdb_backup.bin')
if not os.path.exists(self.maskdb):
print("missing file %s"%self.maskdb, file=sys.stderr)
else:
self.maskdb = None
def symaccesscmd(self, cmd, xml=True):
self.set_environ()
cmd = ['/usr/symcli/bin/symaccess'] + cmd
if self.maskdb is None:
cmd += ['-sid', self.sid]
else:
cmd += ['-f', self.maskdb]
if xml:
cmd += ['-output', 'xml_element']
return justcall(cmd)
def get_sym_maskdb(self):
cmd = ['list', 'database']
out, err, ret = self.symaccesscmd(cmd)
return out
def do_action(action, array_name=None, node=None, **kwargs):
o = Arrays()
array = o.get_array(array_name)
if array is None:
raise ex.excError("array %s not found" % array_name)
if not hasattr(array, action):
raise ex.excError("not implemented")
array.node = node
ret = getattr(array, action)(**kwargs)
if ret is not None:
print(json.dumps(ret, indent=4))
def main(argv, node=None):
parser = OptParser(prog=PROG, options=OPT, actions=ACTIONS,
deprecated_actions=DEPRECATED_ACTIONS,
global_options=GLOBAL_OPTS)
options, action = parser.parse_args(argv)
kwargs = vars(options)
do_action(action, node=node, **kwargs)
if __name__ == "__main__":
try:
ret = main(sys.argv)
except ex.excError as exc:
print(exc, file=sys.stderr)
ret = 1
sys.exit(ret)
opensvc-1.8~20170412/lib/checkRaidMegaRaid.py 0000644 0001750 0001750 00000010702 13073467726 020652 0 ustar jkelbert jkelbert import checks
import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
class check(checks.check):
prefixes = [os.path.join(os.sep, "usr", "local", "admin"),
os.path.join(os.sep, "opt", "MegaRAID", "MegaCli")]
megacli = ["MegaCli64", "MegaCli", "megacli"]
chk_type = "raid"
chk_name = "MegaCli"
def find_megacli(self):
for prog in self.megacli:
if which(prog):
return prog
for prefix in self.prefixes:
megacli = os.path.join(prefix, prog)
if os.path.exists(megacli):
return megacli
return
def do_check(self):
r = self.do_check_ldpdinfo()
r += self.do_check_bbustatus()
return r
def do_check_ldpdinfo(self):
megacli = self.find_megacli()
if megacli is None:
return self.undef
os.chdir(rcEnv.pathtmp)
logs = [os.path.join(rcEnv.pathtmp, 'MegaSAS.log'),
os.path.join(rcEnv.pathtmp, 'MegaCli.log'),
os.path.join(rcEnv.pathtmp, 'MegaRaid.log')]
for log in logs:
if not os.path.exists(log):
continue
os.unlink(log)
cmd = [megacli, '-LdPdInfo', '-aALL']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
r = []
slot = ""
errs = 0
for line in lines:
if line.startswith('Adapter'):
l = line.split('#')
if slot != "":
r.append({
'chk_instance': slot,
'chk_value': str(errs),
'chk_svcname': '',
})
slot = 'slot'+l[-1]
errs = 0
if (line.startswith('State:') and 'Optimal' not in line) or \
(line.startswith('Firmware state:') and 'Online' not in line):
errs += 1
if slot != "":
r.append({
'chk_instance': slot,
'chk_value': str(errs),
'chk_svcname': '',
})
return r
def do_check_bbustatus(self):
megacli = self.find_megacli()
if megacli is None:
return self.undef
os.chdir(rcEnv.pathtmp)
logs = [os.path.join(rcEnv.pathtmp, 'MegaSAS.log'),
os.path.join(rcEnv.pathtmp, 'MegaCli.log'),
os.path.join(rcEnv.pathtmp, 'MegaRaid.log')]
for log in logs:
if not os.path.exists(log):
continue
os.unlink(log)
cmd = [megacli, '-AdpBbuCmd', '-GetBbuStatus', '-aALL']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) == 0:
return self.undef
r = []
slot = ""
for line in lines:
line = line.strip()
if 'Adapter:' in line:
l = line.split()
slot = 'slot'+l[-1]
if line.startswith('BatteryType:') and 'No Battery' in line:
val = 1
r.append({
'chk_instance': '%s battery NoBattery'%slot,
'chk_value': str(val),
'chk_svcname': '',
})
if line.startswith('Relative State of Charge:'):
val = line.strip('%').split()[-1]
r.append({
'chk_instance': '%s battery charge'%slot,
'chk_value': str(val),
'chk_svcname': '',
})
if line.startswith('Temperature:'):
val = line.split()[-2]
r.append({
'chk_instance': '%s battery temp'%slot,
'chk_value': str(val),
'chk_svcname': '',
})
if line.startswith('isSOHGood:'):
if 'Yes' in line:
val = 0
else:
val = 1
r.append({
'chk_instance': '%s battery isSOHGood'%slot,
'chk_value': str(val),
'chk_svcname': '',
})
return r
opensvc-1.8~20170412/lib/resDiskLoop.py 0000644 0001750 0001750 00000001102 13073467726 017633 0 ustar jkelbert jkelbert import resources as Res
from rcGlobalEnv import rcEnv
class Disk(Res.Resource):
""" basic loopback device resource
"""
def __init__(self, rid=None, loopFile=None, **kwargs):
Res.Resource.__init__(self, rid, "disk.loop", **kwargs)
self.loopFile = loopFile
self.label = "loop "+loopFile
def info(self):
return self.fmt_info([["file", self.loopFile]])
def __str__(self):
return "%s loopfile=%s" % (Res.Resource.__str__(self),\
self.loopFile)
if __name__ == "__main__":
help(Disk)
opensvc-1.8~20170412/lib/resSyncIbmdsSnap.py 0000644 0001750 0001750 00000020677 13073467726 020646 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
from rcUtilities import which
import rcExceptions as ex
import rcStatus
import time
import datetime
import resSync
import rcIbmDs
class syncIbmdsSnap(resSync.Sync):
def resyncflash(self):
if self.array is None:
self.array = rcIbmDs.IbmDss().get(self.arrayname)
data = self.lsflash()
ese_pairs = []
other_pairs = []
for d in data:
if d['isTgtSE'] == 'ESE':
ese_pairs.append(d['ID'])
else:
other_pairs.append(d['ID'])
present_pairs = set(map(lambda x: x['ID'], data))
missing_pairs = list(set(self.pairs) - present_pairs)
if len(missing_pairs) > 0:
missing_pairs.sort()
raise ex.excError("refuse to resync as %s pairs are not currently configured"%', '.join(missing_pairs))
self._resyncflash(ese_pairs, '-tgtse')
self._resyncflash(other_pairs)
def _resyncflash(self, pairs, options=None):
if len(pairs) == 0:
return
if self.recording:
self._resyncflash_recording(pairs, options=options)
else:
self._resyncflash_norecording(pairs, options=options)
def _resyncflash_norecording(self, pairs, options=None):
s = 'rmflash -dev %s -quiet' % self.arrayname
l = [s]
l.append(' '.join(pairs))
cmd = ' '.join(l)
out, err = self.array.dscli(cmd, log=self.log)
if len(err) > 0:
raise ex.excError(err)
s = 'mkflash -dev %s -persist' % self.arrayname
if self.bgcopy:
s += ' -cp'
else:
s += ' -nocp'
l = [s]
if options is not None:
l.append(options)
l.append(' '.join(pairs))
cmd = ' '.join(l)
out, err = self.array.dscli(cmd, log=self.log)
if len(err) > 0:
raise ex.excError(err)
def _resyncflash_recording(self, pairs, options=None):
s = 'resyncflash -dev %s -persist -record' % self.arrayname
if self.bgcopy:
s += ' -cp'
else:
s += ' -nocp'
l = [s]
if options is not None:
l.append(options)
l.append(' '.join(pairs))
cmd = ' '.join(l)
out, err = self.array.dscli(cmd, log=self.log)
if len(err) > 0:
raise ex.excError(err)
def can_sync(self, target=None):
self.get_last()
if self.skip_sync(self.last):
return False
return True
def get_last(self, data=None):
if data is None:
data = self.lsflash()
if len(data) == 0:
return
lastsync = datetime.datetime.now()
for _data in data:
_lastsync = _data['DateSynced']
try:
_lastsync = datetime.datetime.strptime(_lastsync, "%a %b %d %H:%M:%S %Z %Y")
except ValueError:
# workaround hp-ux python 2.6
_lastsync = _lastsync.replace("CET", "MET")
_lastsync = datetime.datetime.strptime(_lastsync, "%a %b %d %H:%M:%S %Z %Y")
if _lastsync < lastsync:
lastsync = _lastsync
self.last = lastsync
def _status(self, verbose=False):
try:
data = self.lsflash()
self.get_last(data)
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
r = rcStatus.UP
record_disabled = []
persist_disabled = []
record_enabled = []
state_invalid = []
for _data in data:
if _data['Recording'] == "Disabled":
record_disabled.append(_data['ID'])
elif _data['Recording'] == "Enabled":
record_enabled.append(_data['ID'])
if _data['State'] != "Valid":
state_invalid.append(_data['ID'])
if _data['Persistent'] == "Disabled":
persist_disabled.append(_data['ID'])
if self.recording and len(record_disabled) > 0:
self.status_log("Recording disabled on %s"%','.join(record_disabled))
r = rcStatus.WARN
elif not self.recording and len(record_enabled) > 0:
self.status_log("Recording enabled on %s"%','.join(record_enabled))
r = rcStatus.WARN
if len(state_invalid) > 0:
self.status_log("State not valid on %s"%','.join(state_invalid))
r = rcStatus.WARN
if len(persist_disabled) > 0:
self.status_log("Persistent disabled on %s"%','.join(persist_disabled))
r = rcStatus.WARN
pairs = []
for d in data:
if 'ID' not in d:
continue
pairs.append(d['ID'])
missing = set(self.pairs) - set(pairs)
missing = sorted(list(missing))
if len(missing) > 0:
self.status_log("Missing flashcopy on %s"%','.join(missing))
r = rcStatus.WARN
if self.last is None:
return rcStatus.WARN
elif self.last < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
self.status_log("Last sync on %s older than %d minutes"%(self.last, self.sync_max_delay))
return rcStatus.WARN
elif r == rcStatus.WARN:
return rcStatus.WARN
self.status_log("Last sync on %s"%self.last)
return rcStatus.UP
def sync_break(self):
pass
def sync_resync(self):
self.resyncflash()
def sync_update(self):
self.resyncflash()
def start(self):
pass
def __init__(self,
rid=None,
pairs=[],
array=None,
bgcopy=True,
recording=True,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.ibmdssnap",
**kwargs)
self.label = "flash copy %s"%','.join(pairs)
self.pairs = pairs
self.arrayname = array
self.recording = recording
self.bgcopy = bgcopy
self.array = None
self.last = None
self.params = "setenv -banner off -header on -format delim\n"
self.default_schedule = "@0"
def lsflash(self):
if self.array is None:
self.array = rcIbmDs.IbmDss().get(self.arrayname)
out, err = self.array.dscli(self.params+'lsflash -l -dev %s ' % self.arrayname + ' '.join(self.pairs))
if 'No Flash Copy found' in out:
return []
data = self.parseblock(0, out)
return data
def getblock(self, n, s):
lines = s.replace('dscli> ', '').split('\n')
begin = None
end = None
met = 0
for i, line in enumerate(lines):
if line.startswith("==="):
met += 1
if met < n:
continue
if begin is None:
begin = i-1
else:
end = i-1
break
if end is None:
end = i
return lines[begin:end]
def parseblock(self, n, out):
data = []
lines = self.getblock(n, out)
if len(lines) < 3:
return
headers = lines[0].split(',')
headers_multipliers = []
for i, h in enumerate(headers):
if '^' not in h:
headers_multipliers.append(None)
continue
x = h[h.index('^')+1:h.index('B)')]
x = int(x)
headers_multipliers.append((2**x)/1024/1024)
stripped_header = headers[i][:headers[i].index(' (')]
while stripped_header in headers:
stripped_header += "_"
headers[i] = stripped_header
for line in lines[2:]:
d = {}
l = line.split(',')
for i, key in enumerate(headers):
if i >= len(l):
raise ex.excError("the command dataset does not match its advertized columning")
key = key.strip()
if headers_multipliers[i] is not None:
try:
d[key] = int(float(l[i]) * headers_multipliers[i])
except:
d[key] = l[i]
else:
d[key] = l[i]
data.append(d)
return data
def __str__(self):
return "%s pairs=%s" % (resSync.Sync.__str__(self), ','.join(self.pairs))
opensvc-1.8~20170412/lib/rcCloudAmazon.py 0000644 0001750 0001750 00000002403 13073467726 020143 0 ustar jkelbert jkelbert import rcCloud
import rcExceptions as ex
import socket
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
except ImportError:
raise ex.excInitError("apache-libcloud module must be installed")
class Cloud(rcCloud.Cloud):
mode = 'amazon'
def __init__(self, s, auth):
rcCloud.Cloud.__init__(self, s, auth)
if 'access_key_id' not in auth:
raise ex.excInitError("option 'access_key_id' is mandatory in amazon section")
if 'provider' not in auth:
raise ex.excInitError("option 'provider' is mandatory in amazon section")
if 'secret_key' not in auth:
raise ex.excInitError("option 'secret_key' is mandatory in amazon section")
o = get_driver(auth['provider'])
self.driver = o(auth['access_key_id'], auth['secret_key'])
if 'proxy' in auth:
self.driver.connection.set_http_proxy(proxy_url=auth['proxy'])
def app_id(self, svcname):
return svcname.rstrip(self.auth['manager']).split('.')[-2]
def cloud_id(self):
return self.auth['provider']
def app_cloud_id(self):
return self.cloud_id()
def list_svcnames(self):
l = []
return l
opensvc-1.8~20170412/lib/resSync.py 0000644 0001750 0001750 00000013156 13073467726 017037 0 ustar jkelbert jkelbert import os
import logging
import rcExceptions as ex
import resources as Res
import datetime
import time
import rcStatus
from rcGlobalEnv import rcEnv
from rcScheduler import *
cache_remote_node_env = {}
class Sync(Res.Resource, Scheduler):
def __init__(self,
rid=None,
sync_max_delay=None,
schedule=None,
**kwargs):
if sync_max_delay is None:
self.sync_max_delay = 1500
else:
self.sync_max_delay = sync_max_delay
if schedule is None:
self.schedule = "03:59-05:59@121"
else:
self.schedule = schedule
Res.Resource.__init__(self, rid=rid, **kwargs)
def can_sync(self, target):
return True
def check_timestamp(self, ts, comp='more', delay=10):
""" Return False if timestamp is fresher than now-interval
Return True otherwize.
Zero is a infinite interval
"""
if delay == 0:
raise
limit = ts + datetime.timedelta(minutes=delay)
if comp == "more" and datetime.datetime.now() < limit:
return False
elif comp == "less" and datetime.datetime.now() < limit:
return False
else:
return True
return True
def skip_sync(self, ts):
if not self.svc.options.cron:
return False
if self.svc.sched.skip_action_schedule(self.rid, "sync_schedule", last=ts):
return True
return False
def alert_sync(self, ts):
if ts is None:
return True
if not self.check_timestamp(ts, comp="less", delay=self.sync_max_delay):
return False
return True
def remote_fs_mounted(self, node):
"""
Verify the remote fs is mounted. Some sync resource might want to abort in
this case.
"""
if self.dstfs is None:
# No dstfs check has been configured. Assume the admin knows better.
return True
ruser = self.svc.node.get_ruser(node)
cmd = rcEnv.rsh.split(' ')+['-l', ruser, node, '--', 'LANG=C', 'df', self.dstfs]
(ret, out, err) = self.call(cmd, cache=True, errlog=False)
if ret != 0:
raise ex.excError
"""
# df /zones
/zones (rpool/zones ):131578197 blocks 131578197 files
^
separator !
# df /zones/frcp03vrc0108/root
/zones/frcp03vrc0108/root(rpool/zones/frcp03vrc0108/rpool/ROOT/solaris-0):131578197 blocks 131578197 files
^
no separator !
"""
if self.dstfs+'(' not in out and self.dstfs not in out.split():
self.log.error("The destination fs %s is not mounted on node %s. refuse to sync %s to protect parent fs"%(self.dstfs, node, self.dst))
return False
return True
def remote_node_env(self, node, target):
if target == 'drpnodes':
expected_type = list(set(rcEnv.allowed_svc_envs) - set(['PRD']))
elif target == 'nodes':
if self.svc.svc_env == "PRD":
expected_type = ["PRD"]
else:
expected_type = list(set(rcEnv.allowed_svc_envs) - set(["PRD"]))
else:
self.log.error('unknown sync target: %s'%target)
raise ex.excError
ruser = self.svc.node.get_ruser(node)
rcmd = [rcEnv.nodemgr, 'get', '--param', 'node.env']
if ruser != "root":
rcmd = ['sudo'] + rcmd
if node not in cache_remote_node_env:
cmd = rcEnv.rsh.split(' ')+['-l', ruser, node, '--'] + rcmd
(ret, out, err) = self.call(cmd, cache=True)
if ret != 0:
return False
words = out.split()
if len(words) == 1:
cache_remote_node_env[node] = words[0]
else:
cache_remote_node_env[node] = out
if cache_remote_node_env[node] in expected_type:
return True
self.log.error("incompatible remote node '%s' env: '%s' (expected in %s)"%\
(node, cache_remote_node_env[node], ', '.join(expected_type)))
return False
def pre_sync_check_svc_not_up(self):
if self.svc.options.force:
self.log.info("skip service up status check because --force is set")
else:
s = self.svc.group_status(excluded_groups=set(["sync", "hb", "app"]))
if s['overall'].status != rcStatus.UP:
if self.svc.options.cron:
self.log.debug("won't sync this resource for a service not up")
else:
self.log.info("won't sync this resource for a service not up")
raise ex.excAbortAction
def pre_sync_check_flex_primary(self):
""" Refuse to sync from a flex non-primary node
"""
if self.svc.clustertype in ["flex", "autoflex"] and \
self.svc.flex_primary != rcEnv.nodename:
if self.svc.options.cron:
self.log.debug("won't sync this resource from a flex non-primary node")
else:
self.log.info("won't sync this resource from a flex non-primary node")
raise ex.excAbortAction
def pre_sync_check_prd_svc_on_non_prd_node(self):
if self.svc.svc_env == 'PRD' and rcEnv.node_env != 'PRD':
if self.svc.options.cron:
self.log.debug("won't sync a PRD service running on a !PRD node")
else:
self.log.info("won't sync a PRD service running on a !PRD node")
raise ex.excAbortAction
opensvc-1.8~20170412/lib/checkFsInodeLinux.py 0000644 0001750 0001750 00000003364 13073467726 020756 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "fs_i"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
cmd = ['df', '-lPi']
(out,err,ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
r = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
# discard bind mounts: we get metric from the source anyway
if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'):
continue
if l[5].startswith('/Volumes'):
continue
if l[5].startswith('/media/'):
continue
if l[5].startswith('/run'):
continue
if l[5].startswith('/sys/'):
continue
if l[5].endswith('/shm'):
continue
if "/graph/" in l[5]:
continue
if "/aufs/mnt/" in l[5]:
continue
if "osvc_sync_" in l[0]:
# do not report osvc sync snapshots fs usage
continue
if l[4] == '-':
# vfat, btrfs, ... have no inode counter in df -i
continue
r.append({
'chk_instance': l[5],
'chk_value': l[4],
'chk_svcname': self.find_svc(l[5]),
})
return r
opensvc-1.8~20170412/lib/rcStatsFreeBSD.py 0000644 0001750 0001750 00000010515 13073467726 020163 0 ustar jkelbert jkelbert import os
from rcUtilities import call, which
import rcStats
class StatsProvider(rcStats.StatsProvider):
def cpu(self, d, day, start, end):
cols = ['date',
'usr',
'sys',
'nice',
'irq',
'idle',
'cpu',
'nodename']
cmd = ['bsdsar', '-u', '-n', day]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
if ret != 0:
return cols, lines
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[0] == 'Time':
continue
l += ['ALL', self.nodename]
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def kb(self, s):
n = int(s[0:-1])
unit = s[-1]
if unit == 'k' or unit =='K':
return n
elif unit == 'M':
return n*1024
elif unit == 'G':
return n*1024*1024
elif unit == 'T':
return n*1024*1024*1204
elif unit == 'P':
return n*1024*1024*1204*1024
def mem_u(self, d, day, start, end):
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbmemsys',
'nodename']
cmd = ['sysctl', 'hw.physmem']
(ret, out, err) = call(cmd)
physmem = int(out.split(': ')[1])/1024
cmd = ['sysctl', 'hw.usermem']
(ret, out, err) = call(cmd)
usermem = int(out.split(': ')[1])/1024
cmd = ['bsdsar', '-r', '-n', day]
(ret, buff, err) = call(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 7:
continue
if l[0] == 'Time':
continue
free = self.kb(l[1])
used = self.kb(l[2])+self.kb(l[3])
x = [l[0], str(free), str(used), str(used/(used+free)), str(physmem-usermem), self.nodename]
x[0] = '%s %s'%(d, x[0])
lines.append(x)
return cols, lines
def swap(self, d, day, start, end):
cols = ['date',
'kbswpfree',
'kbswpused',
'pct_swpused',
'kbswpcad',
'pct_swpcad',
'nodename']
cmd = ['bsdsar', '-r', '-n', day]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
if ret != 0:
return cols, lines
for line in buff.split('\n'):
l = line.split()
if len(l) != 7:
continue
if l[0] == 'Time':
continue
free = self.kb(l[6])
used = self.kb(l[5])
x = [l[0], str(free), str(used), str(used/(free+used)), '0', '0']
x.append(self.nodename)
x[0] = '%s %s'%(d, x[0])
lines.append(x)
return cols, lines
def netdev(self, d, day, start, end):
cols = ['date',
'rxpckps',
'rxkBps',
'txpckps',
'txkBps',
'dev',
'nodename']
cmd = ['bsdsar', '-I', '-n', day]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
if ret != 0:
return cols, lines
for line in buff.split('\n'):
l = line.split()
if len(l) != 9:
continue
if l[0] == 'Time':
continue
x = [l[0], l[1], l[3], l[4], l[6], l[8], self.nodename]
x[0] = '%s %s'%(d, x[0])
lines.append(x)
return cols, lines
def netdev_err(self, d, day, start, end):
cols = ['date',
'rxerrps',
'txerrps',
'collps',
'dev',
'nodename']
cmd = ['bsdsar', '-I', '-n', day]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
if ret != 0:
return cols, lines
for line in buff.split('\n'):
l = line.split()
if len(l) != 9:
continue
if l[0] == 'Time':
continue
x = [l[0], l[2], l[5], l[7], l[8], self.nodename]
x[0] = '%s %s'%(d, l[0])
lines.append(x)
return cols, lines
opensvc-1.8~20170412/lib/resSyncRados.py 0000644 0001750 0001750 00000024340 13073467726 020025 0 ustar jkelbert jkelbert import json
from rcUtilities import which, justcall
import rcExceptions as ex
import rcStatus
import datetime
import resSync
class syncRadosSnap(resSync.Sync):
def recreate(self):
self.validate_image_fmt()
for image in self.images:
self._recreate(image)
def _recreate(self, image):
snapnames = self._get_all(image)
last_date, last_name = self._get_last(image)
if self.skip_sync(last_date):
self.log.info("skip resync for image %s: last resync on %s"%(image, str(last_date)))
return
snapname = self.snap_basename() + datetime.datetime.now().strftime(self.date_fmt)
cmd = self.rbd_cmd()+['snap', 'create', image, '--snap', snapname]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
for snapname in snapnames:
self.rm(snapname)
def rm(self, image):
cmd = self.rbd_cmd()+['snap', 'rm', image]
ret, out, err = self.vcall(cmd)
def unprotect(self, image):
cmd = self.rbd_cmd()+['snap', 'unprotect', image]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
def get_all(self):
data = {}
for image in self.images:
data[image] = self._get_all(image)
return data
def _get_all(self, image):
data = self.list()
retained = []
prefix = image+"@"+self.snap_basename()
for name in data:
if not name.startswith(prefix):
continue
retained.append(name)
return retained
def get_last(self):
data = {}
for image in self.images:
data[image] = self._get_last(image)
return data
def _get_last(self, image):
data = self.list()
retained = []
prefix = image+"@"+self.snap_basename()
for name in data:
if not name.startswith(prefix):
continue
try:
date = datetime.datetime.strptime(name, prefix+self.date_fmt)
except:
continue
retained.append((date, name))
if len(retained) == 0:
return None, None
last_date, last_name = sorted(retained)[-1]
return last_date, last_name
def rbd_cmd(self):
l = ["rbd"]
if self.client_id:
l += ["-n", self.client_id]
if self.keyring:
l += ["--keyring", self.keyring]
return l
def snap_basename(self):
return self.rid+"."
def get_pools(self):
l = set([])
for image in self.images:
pool = image.split("/")[0]
l.add(pool)
return l
def list(self):
if self.list_data is not None:
return self.list_data
data = {}
for pool in self.get_pools():
data.update(self._list(pool))
self.list_data = data
return data
def _list(self, pool):
cmd = self.rbd_cmd() + ["ls", "-l", pool, "--format", "json"]
out, err, ret = justcall(cmd)
data = {}
try:
_data = json.loads(out)
except Exception as e:
self.status_log(str(e))
_data = []
for img_data in _data:
idx = pool+"/"+img_data['image']
if "snapshot" in img_data:
idx += "@"+img_data['snapshot']
data[idx] = img_data
return data
def _status(self, verbose=False):
try:
self.validate_image_fmt()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
try:
data = self.get_last()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
nosnap = []
expired = []
ok = []
for image in self.images:
date, snapname = data[image]
if date is None:
nosnap.append(image)
elif date < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
expired.append(image)
else:
ok.append(image)
r = rcStatus.UP
if len(nosnap) > 0:
self.status_log("no snap found for images: "+", ".join(nosnap))
r = rcStatus.WARN
if len(expired) > 0:
self.status_log("snap too old for images: "+", ".join(expired))
r = rcStatus.WARN
return r
def sync_update(self):
self.recreate()
def sync_resync(self):
self.recreate()
def __init__(self,
rid=None,
images=[],
client_id=None,
keyring=None,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.rados",
**kwargs)
self.fmt_label("snap", images)
self.images = images
if not client_id.startswith("client."):
client_id = "client."+client_id
self.client_id = client_id
self.keyring = keyring
self.list_data = None
self.date_fmt = "%Y-%m-%d.%H:%M:%S"
def validate_image_fmt(self):
l = []
for image in self.images:
if image.count("/") != 1:
l.append(image)
if len(l) > 0:
raise ex.excError("wrong format (expected pool/image): "+", ".join(l))
def fmt_label(self, t, l):
self.label = t+" rados %s"%', '.join(l)
if len(self.label) > 80:
self.label = self.label[:76]+"..."
def __str__(self):
return "%s images=%s" % (resSync.Sync.__str__(self),\
', '.join(self.images))
class syncRadosClone(syncRadosSnap):
def __init__(self,
rid=None,
pairs=[],
client_id=None,
keyring=None,
type="sync.rados",
sync_max_delay=None,
schedule=None,
optional=False,
disabled=False,
tags=set([]),
internal=False,
subset=None):
images = map(lambda x: x.split(":")[0], pairs)
syncRadosSnap.__init__(self,
rid=rid,
images=images,
client_id=client_id,
keyring=keyring,
type=type,
sync_max_delay=sync_max_delay,
schedule=schedule,
optional=optional,
disabled=disabled,
tags=tags,
subset=subset)
self.pairs = pairs
self.fmt_label("clone", pairs)
def recreate(self):
self.validate_pair_fmt()
for pair in self.pairs:
self._recreate(pair)
def _recreate(self, pair):
image, clone = pair.split(":")
snapnames = self._get_all(image)
last_date, last_name = self._get_last(image)
if self.skip_sync(last_date):
self.log.info("skip resync for image %s: last resync on %s"%(image, str(last_date)))
return
snapname = self.snap_basename() + datetime.datetime.now().strftime(self.date_fmt)
cmd = self.rbd_cmd()+['snap', 'create', image, '--snap', snapname]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = self.rbd_cmd()+['snap', 'protect', image+"@"+snapname]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
list_data = self.list()
if clone in list_data:
cmd = self.rbd_cmd()+['rm', clone]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = self.rbd_cmd()+['clone', image+"@"+snapname, clone]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError
for snapname in snapnames:
try:
self.unprotect(snapname)
self.rm(snapname)
except:
pass
def validate_pair_fmt(self):
l = []
for pair in self.pairs:
try:
image, clone = pair.split(":")
except:
l.append(image)
continue
if image.count("/") != 1 or clone.count("/") != 1:
l.append(image)
if len(l) > 0:
raise ex.excError("wrong format (expected pool/image:pool/image): "+", ".join(l))
def snap_basename(self):
return self.rid+".cloneref."
def _status(self, verbose=False):
try:
self.validate_pair_fmt()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
try:
data = self.get_last()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
nosnap = []
noclone = []
expired = []
invclone = []
ok = []
for image in self.images:
date, snapname = data[image]
if date is None:
nosnap.append(image)
elif date < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
expired.append(image)
else:
ok.append(image)
list_data = self.list()
for pair in self.pairs:
image, clone = pair.split(":")
if clone not in list_data:
noclone.append(pair)
elif not list_data[clone].get("parent"):
invclone.append(pair)
r = rcStatus.UP
if len(nosnap) > 0:
self.status_log("no snap found for images: "+", ".join(nosnap))
r = rcStatus.WARN
if len(expired) > 0:
self.status_log("snap too old for images: "+", ".join(expired))
r = rcStatus.WARN
if len(noclone) > 0:
self.status_log("no clone found for pairs: "+", ".join(noclone))
r = rcStatus.WARN
if len(invclone) > 0:
self.status_log("clone invalid for pairs: "+", ".join(invclone))
r = rcStatus.WARN
return r
opensvc-1.8~20170412/lib/checkRaidSmartArraySunOS.py 0000777 0001750 0001750 00000000000 13073467726 026450 2checkRaidSmartArray.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resContainerDocker.py 0000644 0001750 0001750 00000050177 13073467726 021201 0 ustar jkelbert jkelbert """
Docker container resource driver module.
"""
import os
import shlex
import resources
import resContainer
import rcExceptions as ex
import rcStatus
from rcUtilitiesLinux import check_ping
from rcUtilities import justcall, lazy, unset_lazy
from rcGlobalEnv import rcEnv
os.environ['LANG'] = 'C'
class Docker(resContainer.Container):
"""
Docker container resource driver.
"""
def __init__(self,
rid,
run_image,
run_command=None,
run_args=None,
docker_service=False,
guestos="Linux",
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name="",
type="container.docker",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.run_image = run_image
self.run_command = run_command
self.run_args = run_args
self.docker_service = docker_service
self.service_id = None
self.container_id = None
self.startup_timeout = 30
@lazy
def container_name(self):
"""
Format a docker container name
"""
container_name = self.svc.svcname+'.'+self.rid
return container_name.replace('#', '.')
@lazy
def service_name(self):
"""
Format a docker compliant docker service name, ie without dots
"""
return self.container_name.replace(".", "_")
def on_add(self):
"""
Init done after self.svc is set.
"""
if self.docker_service:
try:
self.service_id = self.svc.dockerlib.get_service_id_by_name(self)
except Exception:
self.service_id = None
else:
try:
self.container_id = self.svc.dockerlib.get_container_id_by_name(self)
except Exception:
self.container_id = None
self.set_label()
def set_label(self):
if self.docker_service:
self.label = "docker service " + "@".join((
self.service_name,
self.svc.dockerlib.image_userfriendly_name(self)
))
else:
self.label = "docker container " + "@".join((
self.container_name,
self.svc.dockerlib.image_userfriendly_name(self)
))
def __str__(self):
return "%s name=%s" % (resources.Resource.__str__(self), self.name)
def files_to_sync(self):
"""
Files to contribute to sync#i0.
"""
if self.docker_service:
return self.svc.dockerlib.files_to_sync
return []
def operational(self):
"""
Always return True for docker containers.
"""
return True
def vm_hostname(self):
"""
Return an empty string, as we won't need that.
"""
return ""
def get_rootfs(self):
"""
Return the rootgs layer path.
"""
import glob
inspect = self.svc.dockerlib.docker_inspect(self.container_id)
instance_id = str(inspect['Id'])
pattern = str(self.svc.dockerlib.docker_data_dir)+"/*/mnt/"+instance_id
fpaths = glob.glob(pattern)
if len(fpaths) == 0:
raise ex.excError("no candidates rootfs paths matching %s" % pattern)
elif len(fpaths) != 1:
raise ex.excError("too many candidates rootfs paths: %s" % ', '.join(fpaths))
return fpaths[0]
def rcp_from(self, src, dst):
"""
Copy from the container's rootfs to in the host's fs.
"""
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
src = rootfs + src
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s" % (' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
"""
Copy from the host's fs to the container's rootfs.
"""
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
dst = rootfs + dst
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s" % (' '.join(cmd), err))
return out, err, ret
def service_create(self):
self.service_id = self.svc.dockerlib.get_service_id_by_name(self, refresh=True)
if self.service_id is not None:
return
if self.swarm_node_role() not in ("leader", "reachable"):
return
cmd = self.svc.dockerlib.docker_cmd + ['service', 'create', '--name='+self.service_name]
cmd += self._add_run_args()
cmd += [self.run_image]
if self.run_command is not None and self.run_command != "":
cmd += self.run_command.split()
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError(err)
self.svc.dockerlib.get_running_service_ids(refresh=True)
self.service_id = self.svc.dockerlib.get_service_id_by_name(self)
def service_rm(self):
"""
Remove the resource docker service.
"""
self.service_id = self.svc.dockerlib.get_service_id_by_name(self, refresh=True)
if self.service_id is None:
self.log.info("skip: service already removed")
return
if self.swarm_node_role() not in ("leader", "reachable"):
return
cmd = self.svc.dockerlib.docker_cmd + ['service', 'rm', self.service_id]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError(err)
self.svc.dockerlib.get_running_service_ids(refresh=True)
self.service_id = self.svc.dockerlib.get_service_id_by_name(self)
def docker(self, action):
"""
Wrap docker commands to honor .
"""
cmd = self.svc.dockerlib.docker_cmd + []
if action == 'start':
if self.docker_service:
self.service_create()
return
else:
if self.container_id is None:
self.container_id = self.svc.dockerlib.get_container_id_by_name(self, refresh=True)
if self.container_id is None:
cmd += ['run', '-d', '--name='+self.container_name]
cmd += self._add_run_args()
cmd += [self.run_image]
if self.run_command is not None and self.run_command != "":
cmd += self.run_command.split()
else:
cmd += ['start', self.container_id]
elif action == 'stop':
if self.docker_service:
self.service_stop()
return
else:
cmd += ['stop', self.container_id]
elif action == 'kill':
if self.docker_service:
return 0
else:
cmd += ['kill', self.container_id]
else:
self.log.error("unsupported docker action: %s", action)
return 1
ret = self.vcall(cmd, warn_to_info=True)[0]
if ret != 0:
raise ex.excError
if action == 'start':
self.container_id = self.svc.dockerlib.get_container_id_by_name(self, refresh=True)
self.svc.dockerlib.get_running_instance_ids(refresh=True)
def service_stop(self):
if not self.svc.dockerlib.docker_daemon_private and self.swarm_node_role() == "worker":
self.log.info("skip: worker with shared docker daemon")
return
role = self.swarm_node_role()
if self.partial_action():
if role == "worker":
raise ex.excError("actions on a subset of docker services are not possible from a docker worker")
elif role in ("leader", "reachable"):
self.service_rm()
else:
if role == "worker":
self.svc.dockerlib.docker_swarm_leave()
elif role in ("leader", "reachable"):
self.swarm_node_drain()
def swarm_node_drain(self):
if self.swarm_node_role() not in ("leader", "reachable"):
return
node_data = self.svc.dockerlib.node_data()
if node_data["Spec"]["Availability"] == "drain":
return
cmd = self.svc.dockerlib.docker_cmd + ['node', 'update', '--availability=drain', rcEnv.nodename]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to update node availabilty as drain: %s" % err)
def swarm_node_active(self):
if self.swarm_node_role() not in ("leader", "reachable"):
return
node_data = self.svc.dockerlib.node_data()
if node_data["Spec"]["Availability"] == "active":
return
cmd = self.svc.dockerlib.docker_cmd + ['node', 'update', '--availability=active', rcEnv.nodename]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.excError("failed to update node availabilty as active: %s" % err)
def _add_run_args(self):
if self.run_args is None:
return []
args = shlex.split(self.run_args)
for arg, pos in enumerate(args):
if arg != '-p':
continue
if len(args) < pos + 2:
# bad
break
volarg = args[pos+1]
if ':' in volarg:
# mapping ... check source dir presence
elements = volarg.split(':')
if len(elements) != 3:
raise ex.excError("mapping %s should be formatted as "
"::" % (volarg))
if not os.path.exists(elements[0]):
raise ex.excError("source dir of mapping %s does not "
"exist" % (volarg))
if self.svc.dockerlib.docker_min_version("1.7") and not self.docker_service:
args += ["--cgroup-parent", self._parent_cgroup_name()]
return args
def _parent_cgroup_name(self):
"""
Return the name of the container parent cgroup.
Ex: /// with invalid character replaced by dots.
"""
return os.path.join(
os.sep,
self.svc.svcname,
self.rset.rid.replace(":", "."),
self.rid.replace("#", ".")
)
def container_start(self):
self.docker('start')
def _start(self):
self.svc.dockerlib.docker_start()
if self.docker_service:
self.svc.dockerlib.init_swarm()
self.swarm_node_active()
if self.svc.running_action == "boot" and self.swarm_node_role() != "leader":
self.log.info("skip: this docker node is not swarm leader")
return
elif self.svc.running_action != "boot" and self.swarm_node_role() not in ("leader", "reachable"):
self.log.info("skip: this docker node is not swarm manager")
return
resContainer.Container.start(self)
def start(self):
self._start()
self.svc.sub_set_action("ip", "start", tags=set([self.rid]))
def container_stop(self):
self.docker('stop')
def stop(self):
self.svc.sub_set_action("ip", "stop", tags=set([self.rid]))
self._stop()
def partial_action(self):
if not self.svc.command_is_scoped():
return False
all_rids = set([res.rid for res in self.svc.get_resources("container.docker") if res.docker_service])
if len(all_rids - set(self.svc.action_rid)) > 0:
return True
return False
def _stop(self):
self.svc.dockerlib.docker_start()
self.status()
resContainer.Container.stop(self)
self.svc.dockerlib.get_running_instance_ids(refresh=True)
def info(self):
"""
Return keys to contribute to resinfo.
"""
data = self.svc.dockerlib.info()
return self.fmt_info(data)
def wanted_nodes_count(self):
if rcEnv.nodename in self.svc.nodes:
return len(self.svc.nodes)
else:
return len(self.svc.drpnodes)
def run_args_replicas(self):
elements = self.run_args.split()
if "--mode" in elements:
idx = elements.index("--mode")
if "=" in elements[idx]:
mode = elements[idx].split("=")[-1]
else:
mode = elements[idx+1]
if mode == "global":
return self.wanted_nodes_count()
elif "--replicas" in elements:
idx = elements.index("--replicas")
if "=" in elements[idx]:
return int(elements[idx].split("=")[-1])
else:
return int(elements[idx+1])
else:
return 1
@lazy
def service_ps(self):
return self.svc.dockerlib.service_ps_data(self.service_id)
def running_replicas(self, refresh=False):
return len(self.service_running_instances(refresh=refresh))
@lazy
def ready_nodes(self):
return [node["ID"] for node in self.svc.dockerlib.node_ls_data() if node["Status"]["State"] == "ready"]
def post_action(self, action):
"""
Executed after executing on the resourceset
"""
if action not in ("stop", "unprovision", "shutdown"):
return
self.svc.dockerlib.docker_stop()
def service_running_instances(self, refresh=False):
if refresh:
unset_lazy(self, "service_ps")
instances = []
for inst in self.service_ps:
if inst["Status"]["State"] != "running":
continue
if inst["NodeID"] not in self.ready_nodes:
continue
instances.append(inst)
return instances
def service_hosted_instances(self):
out = self.svc.dockerlib.get_ps()
return [line.split()[0] for line in out.splitlines() \
if self.service_name in line and \
"Exited" not in line and \
"Failed" not in line and \
"Created" not in line]
def swarm_node_role(self):
return self.svc.dockerlib.swarm_node_role
def _status_service_replicas_state(self):
if self.swarm_node_role() != "leader":
return
for inst in self.service_ps:
if inst["NodeID"] not in self.ready_nodes:
continue
if inst["DesiredState"] != inst["Status"]["State"]:
self.status_log("instance %s in state %s, desired %s" % (inst["ID"], inst["Status"]["State"], inst["DesiredState"]))
def _status_service_replicas(self):
if self.swarm_node_role() != "leader":
return
wanted = self.run_args_replicas()
if wanted is None:
return
current = self.running_replicas()
if wanted != current:
if current == 0:
# a pure resource 'down' state, we don't want to cause a warn
# at the service overall status
level = "info"
else:
level = "warn"
self.status_log("%d replicas wanted, %d currently running" % (wanted, current), level)
def _status_service_image(self):
if self.swarm_node_role() != "leader":
return
try:
run_image_id = self.svc.dockerlib.get_run_image_id(self)
except ValueError as exc:
self.status_log(str(exc))
return
try:
inspect = self.svc.dockerlib.docker_service_inspect(self.service_id)
except Exception:
return
running_image_id = inspect['Spec']['TaskTemplate']['ContainerSpec']['Image']
running_image_id = self.svc.dockerlib.repotag_to_image_id(running_image_id)
if run_image_id and run_image_id != running_image_id:
self.status_log("the service is configured with image '%s' "
"instead of '%s'"%(running_image_id, run_image_id))
def _status_container_image(self):
try:
run_image_id = self.svc.dockerlib.get_run_image_id(self)
except ValueError as exc:
self.status_log(str(exc))
return
try:
inspect = self.svc.dockerlib.docker_inspect(self.container_id)
except Exception:
return
running_image_id = inspect['Image']
if run_image_id and run_image_id != running_image_id:
self.status_log("the current container is based on image '%s' "
"instead of '%s'"%(running_image_id, run_image_id))
def _status(self, verbose=False):
if not self.svc.dockerlib.docker_running():
self.status_log("docker daemon is not running", "info")
return rcStatus.DOWN
if self.docker_service:
if self.swarm_node_role() == "none":
self.status_log("swarm node is not joined", "info")
return rcStatus.DOWN
self.svc.dockerlib.nodes_purge()
self.running_replicas(refresh=True)
self._status_service_image()
self._status_service_replicas()
self._status_service_replicas_state()
sta = resContainer.Container._status(self, verbose)
hosted = len(self.service_hosted_instances())
if hosted > 0:
self.status_log("%d/%d instances hosted" % (hosted, self.run_args_replicas()), "info")
balance_min, balance_max = self.balance
if hosted > balance_max:
self.status_log("%d>%d instances imbalance" % (hosted, balance_max), "warn")
elif hosted < balance_min:
self.status_log("%d<%d instances imbalance" % (hosted, balance_min), "warn")
elif sta == rcStatus.UP:
sta = rcStatus.STDBY_UP
else:
sta = resContainer.Container._status(self, verbose)
self._status_container_image()
return sta
@lazy
def balance(self):
replicas = self.run_args_replicas()
nodes = self.wanted_nodes_count()
balance = replicas // nodes
if balance == 0:
balance = 1
if replicas % nodes == 0:
return balance, balance
else:
return balance, balance+1
def container_forcestop(self):
self.docker('kill')
def _ping(self):
return check_ping(self.addr, timeout=1)
def is_up(self):
if self.svc.dockerlib.docker_daemon_private and \
self.svc.dockerlib.docker_data_dir is None:
self.status_log("DEFAULT.docker_data_dir must be defined")
if not self.svc.dockerlib.docker_running():
return False
if self.docker_service:
if self.swarm_node_role() == "leader":
if self.service_id is None:
self.status_log("docker service is not created", "info")
return False
if self.running_replicas(refresh=True) == 0:
return False
if self.service_id in self.svc.dockerlib.get_running_service_ids(refresh=True):
return True
else:
return True
else:
if self.container_id is None:
return False
if self.container_id in self.svc.dockerlib.get_running_instance_ids(refresh=True):
return True
return False
def get_container_info(self):
return {'vcpus': '0', 'vmem': '0'}
def check_manual_boot(self):
return True
def check_capabilities(self):
return True
def provision(self):
# docker resources are naturally provisioned
self._start()
self.status(refresh=True)
self.svc.sub_set_action("ip", "provision", tags=set([self.rid]))
def unprovision(self):
self.svc.sub_set_action("ip", "unprovision", tags=set([self.rid]))
self._stop()
self.status(refresh=True)
opensvc-1.8~20170412/lib/checkFsUsageWindows.py 0000644 0001750 0001750 00000001660 13073467726 021314 0 ustar jkelbert jkelbert import checks
from rcUtilitiesWindows import get_drives
class check(checks.check):
chk_type = "fs_u"
def find_svc(self, mountpt):
for svc in self.svcs:
for resource in svc.get_resources('fs'):
if resource.mount_point == mountpt:
return svc.svcname
return ''
def do_check(self):
import win32api
cmd = ['df', '-lP']
r = []
for drive in get_drives():
try:
n_free_user, n_total, n_free = win32api.GetDiskFreeSpaceEx(drive+':\\')
except:
continue
pct = 100 * (n_total - n_free) // n_total
r.append({
'chk_instance': drive,
'chk_value': str(pct),
'chk_svcname': self.find_svc(drive),
})
return r
if __name__ == "__main__":
o = check()
print(o.do_check())
opensvc-1.8~20170412/lib/resDiskVgHP-UX.py 0000644 0001750 0001750 00000027131 13073467726 020072 0 ustar jkelbert jkelbert import re
import os
import rcExceptions as ex
import resDisk
from subprocess import *
from rcUtilities import qcall
from rcGlobalEnv import rcEnv
class Disk(resDisk.Disk):
def __init__(self,
rid=None,
name=None,
dsf=True,
**kwargs):
self.label = "vg "+name
self.dsf = dsf
resDisk.Disk.__init__(self,
rid=rid,
name=name,
type='disk.vg',
**kwargs)
def is_child_dev(self, device):
l = device.split("/")
if len(l) != 4 or l[1] != "dev":
return False
vgname = l[2]
if vgname == self.name:
return True
return False
def files_to_sync(self):
return [self.mapfile_name(), self.mkfsfile_name()]
def mapfile_name(self):
return os.path.join(rcEnv.pathvar, 'vg_' + self.svc.svcname + '_' + self.name + '.map')
def mkfsfile_name(self):
return os.path.join(rcEnv.pathvar, 'vg_' + self.svc.svcname + '_' + self.name + '.mksf')
def has_it(self):
""" returns True if the volume is present
"""
if self.is_active():
return True
if not os.path.exists(self.mapfile_name()):
return False
if self.is_imported():
return True
return False
def dev2char(self, dev):
dev = dev.replace("/dev/disk", "/dev/rdisk")
dev = dev.replace("/dev/dsk", "/dev/rdsk")
return dev
def dsf_name(self, dev):
cmd = ['scsimgr', 'get_attr', '-D', self.dev2char(dev), '-a', 'device_file', '-p']
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
return out.split()[0]
def write_mksf(self):
cmd = ['ioscan', '-F', '-m', 'dsf']
(ret, buff, err) = self.call(cmd)
if ret != 0:
raise ex.excError
if len(buff) == 0:
return
mksf = {}
if len(self.disks) == 0:
self.disks = self.disklist()
dsf_names = map(self.dsf_name, self.disks)
with open(self.mkfsfile_name(), 'w') as f:
for line in buff.split('\n'):
if len(line) == 0:
return
a = line.split(':')[0]
if '/dev/pt/pt' not in a and '/dev/rdisk/disk' not in a and not a.endswith(".pt") and self.dsf_name(a) in dsf_names:
cmd = ['scsimgr', 'get_attr', '-D', self.dev2char(a), '-a', 'wwid', '-p']
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
f.write(":".join([a, out.split()[0].replace('0x', '')])+'\n')
def do_mksf(self):
if not os.path.exists(self.mkfsfile_name()):
return
instance = {}
cmd = ['scsimgr', 'get_attr', 'all_lun', '-a', 'wwid', '-a', 'instance', '-p']
(ret, buff, err) = self.call(cmd)
for line in buff.split('\n'):
l = line.split(':')
if len(l) != 2:
continue
instance[l[0].replace('0x', '')] = l[1]
r = 0
with open(self.mkfsfile_name(), 'r') as f:
for line in f.readlines():
a = line.replace('\n', '').split(':')
if len(a) == 0:
continue
if os.path.exists(a[0]):
continue
if a[1] not in instance.keys():
self.log.error("expected lun %s not present on node %s"%(a[1], rcEnv.nodename))
r += 1
continue
cmd = ['mksf', '-r', '-C', 'disk', '-I', instance[a[1]], a[0]]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
r += 1
continue
if r > 0:
raise ex.excError
def presync(self):
""" this one is exported as a service command line arg
"""
cmd = [ 'vgexport', '-m', self.mapfile_name(), '-p', '-s', self.name ]
ret = qcall(cmd)
if ret != 0:
raise ex.excError
self.write_mksf()
def is_active(self):
cmd = [ 'vgdisplay', self.name ]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
if not "available" in buff[0]:
return False
return True
def is_imported(self):
r = self.is_imported_lvm2()
if r:
return True
return self.is_imported_lvm1()
def is_imported_lvm2(self):
if not os.path.exists('/etc/lvmtab_p'):
return False
cmd = ['strings', '/etc/lvmtab_p']
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
out, err = process.communicate()
l = out.split('\n')
map(lambda x: x.strip(), l)
s = '/dev/'+self.name
if s in l:
return True
return False
def is_imported_lvm1(self):
if not os.path.exists('/etc/lvmtab'):
return False
cmd = ['strings', '/etc/lvmtab']
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
out, err = process.communicate()
l = out.split('\n')
map(lambda x: x.strip(), l)
s = '/dev/'+self.name
if s in l:
return True
return False
def is_up(self):
"""Returns True if the volume group is present and activated
"""
if not os.path.exists(self.mapfile_name()):
try:
self.do_export(force_preview=True)
except ex.excError:
# vg does not exist
return False
if not self.is_imported():
return False
if not self.is_active():
return False
return True
def clean_group(self):
gp = os.path.join(os.sep, "dev", self.name, "group")
if not os.path.exists(gp):
return
cmd = ["rmsf", gp]
ret, out, err = self.vcall(cmd)
if ret != 0:
self.log.error("failed to remove pre-existing %s"%gp)
raise ex.excError
def do_import(self):
if self.is_imported():
self.log.info("%s is already imported" % self.name)
return
if self.dsf:
dsfflag = '-N'
else:
dsfflag = ''
self.lock()
self.clean_group()
cmd = [ 'vgimport', '-m', self.mapfile_name(), '-s', dsfflag, self.name ]
self.log.info(' '.join(cmd))
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True)
buff = process.communicate()
self.unlock()
# we will modify buff[1], so convert from tuple to list
buff = list(buff)
# test string for warnings
#buff[1] = """Warning: Cannot determine block size of Physical Volume "/dev/rdisk/disk394".
#Assuming a default value of 1024 bytes. Continuing.
#Warning: Cannot determine block size of Physical Volume "/dev/rdisk/disk395".
#Assuming a default value of 1024 bytes. Continuing.
#vgimport:"""
if len(buff[1]) > 0:
import re
regex = re.compile("Warning:.*\n.*Continuing.\n", re.MULTILINE)
w = regex.findall(buff[1])
if len(w) > 0:
warnings = '\n'.join(w)
self.log.warning(warnings)
buff[1] = regex.sub('', buff[1])
if buff[1] != "vgimport: " and buff[1] != "vgimport:":
self.log.error('error:\n' + buff[1])
if len(buff[0]) > 0:
self.log.debug('output:\n' + buff[0])
if process.returncode != 0:
raise ex.excError
def do_export(self, force_preview=False):
preview = False
if os.path.exists(self.mapfile_name()):
if not self.is_imported():
self.log.info("%s is already exported" % self.name)
return
elif self.is_active():
preview = True
if preview or force_preview:
cmd = [ 'vgexport', '-p', '-m', self.mapfile_name(), '-s', self.name ]
else:
cmd = [ 'vgexport', '-m', self.mapfile_name(), '-s', self.name ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_activate(self):
if self.is_active():
self.log.info("%s is already available" % self.name)
return
cmd = ['vgchange', '-c', 'n', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = ['vgchange', '-a', 'y', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_deactivate(self):
if not self.is_active():
self.log.info("%s is already unavailable" % self.name)
return
cmd = ['vgchange', '-a', 'n', self.name]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def do_start(self):
self.do_import()
self.do_activate()
def do_stop(self):
self.do_deactivate()
self.do_export()
def start(self):
self.do_mksf()
self.can_rollback = True
self.do_start()
def disklist(self):
need_export = False
if not self.is_active() and not self.is_imported():
self.do_import()
need_export = True
self.disks = set([])
if os.path.exists('/etc/lvmtab'):
self.disks |= self._disklist('/etc/lvmtab')
if os.path.exists('/etc/lvmtab_p'):
self.disks |= self._disklist('/etc/lvmtab_p')
if need_export:
self.do_export()
return self.disks
def _disklist(self, tabp):
cmd = ['strings', tabp]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
tab = out.split('\n')
insection = False
disks = set([])
for e in tab:
""" move to the first disk of the vg
"""
if e == "/dev/" + self.name:
insection = True
continue
if not insection:
continue
if not e.startswith('/dev/'):
continue
if not e.startswith('/dev/disk') and not e.startswith('/dev/dsk'):
break
disks |= set([e])
return disks
def lock(self, timeout=30, delay=1):
import lock
lockfile = os.path.join(rcEnv.pathlock, 'vgimport')
lockfd = None
try:
lockfd = lock.lock(timeout=timeout, delay=delay, lockfile=lockfile)
except lock.lockTimeout:
self.log.error("timed out waiting for lock (%s)"%lockfile)
raise ex.excError
except lock.lockNoLockFile:
self.log.error("lock_nowait: set the 'lockfile' param")
raise ex.excError
except lock.lockCreateError:
self.log.error("can not create lock file %s"%lockfile)
raise ex.excError
except lock.lockAcquire as e:
self.log.warn("another action is currently running (pid=%s)"%e.pid)
raise ex.excError
except ex.excSignal:
self.log.error("interrupted by signal")
raise ex.excError
except:
self.save_exc()
raise ex.excError("unexpected locking error")
self.lockfd = lockfd
def unlock(self):
import lock
lock.unlock(self.lockfd)
def provision(self):
m = __import__("provDiskVgHP-UX")
prov = getattr(m, "ProvisioningDisk")(self)
prov.provisioner()
opensvc-1.8~20170412/lib/rcSvcmon.py 0000644 0001750 0001750 00000004676 13073467726 017212 0 ustar jkelbert jkelbert import os
import rcExceptions as ex
from rcUtilities import *
if rcEnv.sysname == "Windows":
mp = False
else:
try:
from multiprocessing import Process, Queue, Lock
mp = True
except:
mp = False
def svcmon_normal1(svc, queue=None):
# don't schedule svcmon updates for encap services.
# those are triggered by the master node
status = svc.group_status()
containers = svc.get_resources("container")
if len(containers) > 0 and svc.has_encap_resources:
for container in containers:
try:
s = svc.encap_json_status(container)
except ex.excNotAvailable as e:
s = {'resources': [],
'ip': 'n/a',
'disk': 'n/a',
'sync': 'n/a',
'hb': 'n/a',
'container': 'n/a',
'fs': 'n/a',
'share': 'n/a',
'app': 'n/a',
'avail': 'n/a',
'overall': 'n/a'}
o = svc.svcmon_push_lists(status)
_size = len(str(o))
if queue is None or _size > 30000:
# multiprocess Queue not supported, can't combine results
g_vars, g_vals, r_vars, r_vals = svc.svcmon_push_lists(status)
svc.node.collector.call('svcmon_update_combo', g_vars, g_vals, r_vars, r_vals)
else:
queue.put(svc.svcmon_push_lists(status))
def svcmon_normal(svcs):
ps = []
queues = {}
for svc in svcs:
if svc.encap:
continue
if not mp:
svcmon_normal1(svc, None)
continue
try:
queues[svc.svcname] = Queue(maxsize=32000)
except:
# some platform don't support Queue's synchronize (bug 3770)
queues[svc.svcname] = None
p = Process(target=svcmon_normal1, args=(svc, queues[svc.svcname]))
p.start()
ps.append(p)
for p in ps:
p.join()
if mp:
g_vals = []
r_vals = []
for svc in svcs:
if svc.svcname not in queues or queues[svc.svcname] is None:
continue
if queues[svc.svcname].empty():
continue
g_vars, _g_vals, r_vars, _r_vals = queues[svc.svcname].get()
g_vals.append(_g_vals)
r_vals.append(_r_vals)
if len(g_vals) > 0:
svc.node.collector.call('svcmon_update_combo', g_vars, g_vals, r_vars, r_vals)
opensvc-1.8~20170412/lib/svcmon.py 0000644 0001750 0001750 00000032030 13073467726 016706 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
import os
import optparse
import string
import platform
#
# add project lib to path
#
prog = "svcmon"
import svcBuilder
import rcExceptions as ex
from rcUtilities import *
from lock import *
import node
import rcStatus
import rcColor
sysname, nodename, x, x, machine, x = platform.uname()
if sysname == "Windows":
mp = False
else:
try:
from multiprocessing import Process, Queue, Lock
mp = True
except:
mp = False
try:
from version import version
except:
version = "dev"
def max_len(svcs):
_len = 7
for svc in svcs:
if type(svc) == dict:
svcname = svc.get("svcname", "")
else:
svcname = svc.svcname
l = len(svcname)
if _len < l:
_len = l
if not hasattr(svc, "get_resources"):
continue
for container in svc.get_resources('container'):
l = len(getattr(container, "name")) + 2
if _len < l:
_len = l
return _len
def svcmon_get_status(svc):
svc.group_status()
def svcmon_normal1(svc, options, fmt=None):
# don't schedule svcmon updates for encap services.
# those are triggered by the master node
status = svc.group_status()
l = []
applen = 10
app = str(svc.app)
if len(app) > applen:
app = app[:applen-1]+"*"
name = svc.svcname
name = rcColor.colorize(fmt.split()[0] % name, rcColor.color.BOLD)
data = [
name,
app,
svc.svc_env,
svc.clustertype,
'-',
"yes" if svc.frozen() else "no",
"yes" if svc.disabled else "no",
rcStatus.colorize_status(status["avail"]),
rcStatus.colorize_status(status["overall"]),
]
if options.verbose:
data += [
rcStatus.colorize_status(status["container"]),
rcStatus.colorize_status(status["ip"]),
rcStatus.colorize_status(status["disk"]),
rcStatus.colorize_status(status["fs"]),
rcStatus.colorize_status(status.get("share", "n/a")),
rcStatus.colorize_status(status["app"]),
rcStatus.colorize_status(status["hb"]),
rcStatus.colorize_status(status["sync"]),
]
buff = fmt % tuple(data)
l.append(buff)
containers = svc.get_resources("container")
if len(containers) > 0 and svc.has_encap_resources:
for container in containers:
try:
s = svc.encap_json_status(container)
except ex.excNotAvailable as e:
s = {'resources': [],
'ip': 'n/a',
'disk': 'n/a',
'sync': 'n/a',
'hb': 'n/a',
'container': 'n/a',
'fs': 'n/a',
'share': 'n/a',
'app': 'n/a',
'avail': 'n/a',
'overall': 'n/a'}
name = " @"+container.name
name = rcColor.colorize(fmt.split()[0] % name, rcStatus.color.WHITE)
data = [
name,
'-',
'-',
'-',
container.type.replace('container.', ''),
'-',
'-',
rcStatus.colorize_status(s["avail"]),
rcStatus.colorize_status(s["overall"]),
]
if options.verbose:
data += [
rcStatus.colorize_status(s["container"]),
rcStatus.colorize_status(s["ip"]),
rcStatus.colorize_status(s["disk"]),
rcStatus.colorize_status(s["fs"]),
rcStatus.colorize_status(s.get("share", "n/a")),
rcStatus.colorize_status(s["app"]),
rcStatus.colorize_status(s["hb"]),
rcStatus.colorize_status(s["sync"]),
]
buff = fmt % tuple(data)
l.append(buff)
print('\n'.join(l))
o = svc.svcmon_push_lists(status)
return svc.svcmon_push_lists(status)
def svcmon_cluster(node, options):
svcnames = ",".join([r.svcname for r in node.svcs])
try:
data = node.collector_rest_get("/services?props=svc_id,svcname,svc_app,svc_env,svc_cluster_type,svc_status,svc_availstatus,svc_status_updated&meta=0&orderby=svcname&filters=svcname (%s)&limit=0"%svcnames)
except Exception as exc:
print("error fetching data from the collector rest api: %s" % str(exc), file=sys.stderr)
return 1
if "error" in data:
print("error fetching data from the collector rest api: %s" % data["error"], file=sys.stderr)
return 1
if "data" not in data:
print("no 'data' key in the collector rest api response", file=sys.stderr)
return 1
if len(data["data"]) == 0:
print("no service found on the collector", file=sys.stderr)
return 1
svc_ids = []
for d in data["data"]:
svc_ids.append(d["svc_id"])
max_len_data = []
max_len_data += data["data"]
if options.verbose:
instance_data = svcmon_cluster_verbose_data(node, svc_ids)
for instances in instance_data.values():
max_len_data += instances
svcname_len = max_len(max_len_data)
fmt_svcname = '%(svcname)-' + str(svcname_len) + 's'
fmt = fmt_svcname + ' %(svc_app)-10s %(svc_env)-4s %(svc_cluster_type)-8s | %(svc_availstatus)-10s %(svc_status)-10s | %(svc_status_updated)s'
print(" "*svcname_len+" app type topology | avail overall | updated")
print(" "*svcname_len+" -------------------------+-----------------------+--------------------")
for d in data["data"]:
d["svcname"] = rcColor.colorize(fmt_svcname % d, rcStatus.color.BOLD)
d["svc_status"] = rcStatus.colorize_status(d["svc_status"])
d["svc_availstatus"] = rcStatus.colorize_status(d["svc_availstatus"])
print(fmt % d)
if options.verbose:
if d['svc_id'] not in instance_data:
print(" (no instances data)")
continue
for inst in instance_data[d["svc_id"]]:
print(fmt%inst)
def svcmon_cluster_verbose_data(node, svc_ids):
data = node.collector_rest_get("/services_instances?props=svc_id,node_id,mon_availstatus,mon_overallstatus,mon_updated&meta=0&filters=svc_id (%s)&filters=mon_updated>-16m&limit=0"%",".join(svc_ids))
if "error" in data:
print("error fetching data from the collector rest api: %s" % data["error"], file=sys.stderr)
return {}
if "data" not in data:
print("no 'data' key in the collector rest api response", file=sys.stderr)
return {}
if len(data["data"]) == 0:
print("no service instance found on the collector", file=sys.stderr)
return {}
_data = {}
node_ids = set([])
for d in data["data"]:
node_ids.add(d["node_id"])
node_data = node.collector_rest_get("/nodes?props=node_id,nodename&meta=0&filters=node_id (%s)&limit=0"%",".join(node_ids))
if "error" in node_data:
print("error fetching data from the collector rest api: %s" % data["error"], file=sys.stderr)
return {}
if "data" not in node_data:
print("no 'data' key in the collector rest api response", file=sys.stderr)
return {}
if len(node_data["data"]) == 0:
print("no node found on the collector", file=sys.stderr)
return {}
nodenames = {}
for d in node_data["data"]:
nodenames[d["node_id"]] = d["nodename"]
for d in data["data"]:
if d["svc_id"] not in _data:
_data[d["svc_id"]] = []
d["svc_app"] = ""
d["svc_cluster_type"] = ""
d["svc_env"] = ""
d["svc_availstatus"] = rcStatus.colorize_status(d["mon_availstatus"])
d["svc_status"] = rcStatus.colorize_status(d["mon_overallstatus"])
d["svc_status_updated"] = d["mon_updated"]
if d["node_id"] in nodenames:
nodename = nodenames[d["node_id"]]
else:
nodename = d["node_id"]
d["svcname"] = " @"+nodename
_data[d["svc_id"]].append(d)
return _data
def svcmon_normal(svcs, options):
svcname_len = max_len(svcs)
fmt_svcname = '%-' + str(svcname_len) + 's'
if options.verbose:
fmt = fmt_svcname + ' %-10s %-4s %-8s %-9s | %-6s %-8s | %-10s %-10s | %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s'
print(" "*svcname_len+" app type topology container | frozen disabled | avail overall | container ip disk fs share app hb sync")
print(" "*svcname_len+" -----------------------------------+-----------------+-----------------------+----------------------------------------------------------------------------------")
else:
fmt = fmt_svcname + ' %-10s %-4s %-8s %-9s | %-6s %-8s | %-10s %-10s'
print(" "*svcname_len+" app type topology container | frozen disabled | avail overall ")
print(" "*svcname_len+" -----------------------------------+-----------------+-----------------------")
ps = []
if mp and options.refresh:
#
# parallelize the slow path, ie the status refresh
#
for svc in svcs:
if svc.encap and options.upddb:
continue
p = Process(target=svcmon_get_status, args=(svc,))
p.start()
ps.append(p)
for p in ps:
p.join()
g_vals = []
r_vals = []
for svc in sorted(svcs, key=lambda x: x.svcname):
g_vars, _g_vals, r_vars, _r_vals = svcmon_normal1(svc, options, fmt)
if options.upddb:
g_vals.append(_g_vals)
r_vals.append(_r_vals)
if options.upddb and len(g_vals) > 0:
if options.delay > 0:
import random
import time
delay = int(random.random()*options.delay)
time.sleep(delay)
svc.node.collector.call('svcmon_update_combo', g_vars, g_vals, r_vars, r_vals)
__ver = prog + " version " + version
__usage = prog + " [ OPTIONS ]\n"
parser = optparse.OptionParser(version=__ver, usage=__usage)
parser.add_option("-s", "--service", default="", action="store", dest="parm_svcs",
help="comma-separated list of service to display status of")
parser.add_option("--refresh", default=False, action="store_true", dest="refresh",
help="do not use resource status cache")
parser.add_option("--updatedb", default=False, action="store_true", dest="upddb",
help="update resource status in central database")
parser.add_option("-v", "--verbose", default=False, action="store_true", dest="verbose",
help="display resource groups status for each selected service")
parser.add_option("--maxdelaydb", default=0, action="store", type="int", dest="delay",
help="introduce a random delay before pushing to database to level the load on the collector")
parser.add_option("--debug", default=False, action="store_true", dest="debug",
help="debug mode")
parser.add_option("-c", "--cluster", default=False, action="store_true", dest="cluster",
help="fetch and display cluster-wide service status from the collector.")
parser.add_option("--color", default="auto", action="store", dest="color",
help="colorize output. possible values are : auto=guess based on tty presence, always|yes=always colorize, never|no=never colorize")
def _main(node, argv=None):
(options, args) = parser.parse_args(argv)
rcColor.use_color = options.color
if options.upddb:
lockf = 'svcmon.lock'
try:
lockfd = monlock(fname=lockf)
except ex.excError:
return 1
except:
import traceback
traceback.print_exc()
return 1
if len(options.parm_svcs) > 0:
node.build_services(svcnames=options.parm_svcs.split(','))
else:
node.build_services()
node.set_rlimit()
for s in node.svcs:
s.options.debug = options.debug
s.options.refresh = options.upddb
if options.refresh:
s.options.refresh = options.refresh
if options.cluster:
ret = svcmon_cluster(node, options)
else:
ret = svcmon_normal(node.svcs, options)
node.close()
if options.upddb:
try:
monunlock(lockfd)
except ex.excError:
return 1
except:
import traceback
traceback.print_exc()
return 1
if ret is None:
ret = 0
return ret
def main(argv=None):
if argv is None:
argv = sys.argv
node_mod = ximport('node')
try:
node = node_mod.Node()
except Exception as exc:
print(exc, file=sys.stderr)
return 1
try:
return _main(node, argv)
except ex.excError as e:
print(e, file=sys.stderr)
return 1
finally:
node.close()
return 0
if __name__ == "__main__":
ret = main()
sys.exit(ret)
opensvc-1.8~20170412/lib/rcIfconfigOSF1.py 0000644 0001750 0001750 00000006102 13073467726 020104 0 ustar jkelbert jkelbert from subprocess import *
import rcIfconfig
from rcUtilities import justcall
def ipv4_bitmask(s):
if len(s) != 8:
return
import re
regex = re.compile('^[0-9a-f]*$')
if regex.match(s) is None:
return
r = []
for i in range(4):
pk = s[2*i:2*i+2]
r.append(int(pk, 16))
return '.'.join(map(str, r))
class ifconfig(rcIfconfig.ifconfig):
def __init__(self, mcast=False):
rcIfconfig.ifconfig.__init__(self, mcast=mcast)
out = Popen(['ifconfig', '-a'], stdin=None, stdout=PIPE,stderr=PIPE,close_fds=True).communicate()[0]
self.parse(out)
def set_hwaddr(self, i):
if i is None or i.hwaddr != '':
return i
if ":" in i.name:
name = i.name.split(":")[0]
else:
name = i.name
cmd = ["hwmgr", "get", "attribute", "-category", "network",
"-a", "name="+name, "-a", "MAC_address"]
out, err, ret = justcall(cmd)
if ret != 0:
return i
for line in out.split('\n'):
if not line.strip().startswith("MAC"):
continue
l = line.split("=")
i.hwaddr = l[1].replace('-', ':').lower()
return i
def parse(self, out):
i = None
for l in out.split("\n"):
if l == '' : continue
if l[0]!=' ' :
i = self.set_hwaddr(i)
(ifname,ifstatus)=l.split(': ')
i=rcIfconfig.interface(ifname)
self.intf.append(i)
# defaults
i.link_encap = ''
i.scope = ''
i.bcast = []
i.mtu = []
i.mask = []
i.ipaddr = []
i.ip6addr = []
i.ip6mask = []
i.hwaddr = ''
i.flag_up = False
i.flag_broadcast = False
i.flag_running = False
i.flag_multicast = False
i.flag_ipv4 = False
i.flag_ipv6 = False
i.flag_loopback = False
if 'UP' in ifstatus : i.flag_up = True
elif 'BROADCAST' in ifstatus : i.flag_broadcast = True
elif 'RUNNING' in ifstatus : i.flag_running = True
elif 'MULTICAST' in ifstatus : i.flag_multicast = True
elif 'IPv4' in ifstatus : i.flag_ipv4 = True
elif 'IPv6' in ifstatus : i.flag_ipv6 = True
else:
n=0
w=l.split()
while n < len(w) :
[p,v]=w[n:n+2]
if p == 'inet' :
i.ipaddr.append(v)
i.mask.append(ipv4_bitmask(w[n+3]))
elif p == 'ipmtu' : i.mtu.append(v)
elif p == 'inet6' :
(a, m) = v.split('/')
i.ip6addr += [a]
i.ip6mask += [m]
n+=2
i = self.set_hwaddr(i)
if __name__ == "__main__":
for c in (ifconfig,) :
help(c)
opensvc-1.8~20170412/lib/resContainerLxc.py 0000644 0001750 0001750 00000035644 13073467726 020522 0 ustar jkelbert jkelbert import os
from datetime import datetime
from subprocess import *
import sys
import rcStatus
import resources as Res
from rcUtilitiesLinux import check_ping
from rcUtilities import which, justcall
from rcGlobalEnv import rcEnv
import resContainer
import rcExceptions as ex
class Lxc(resContainer.Container):
"""
container status transition diagram :
---------
| STOPPED |<---------------
--------- |
| |
start |
| |
V |
---------- |
| STARTING |--error- |
---------- | |
| | |
V V |
--------- ---------- |
| RUNNING | | ABORTING | |
--------- ---------- |
| | |
no process | |
| | |
V | |
---------- | |
| STOPPING |<------- |
---------- |
| |
---------------------
"""
def files_to_sync(self):
# the config file might be in a umounted fs resource
# in which case, no need to ask for its sync as the sync won't happen
l = []
# replicate the config file in the system standard path
cf = self.get_cf_path()
if cf:
l.append(cf)
return l
def rcp_from(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
src = rootfs + src
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def rcp(self, src, dst):
rootfs = self.get_rootfs()
if len(rootfs) == 0:
raise ex.excError()
dst = rootfs + dst
cmd = ['cp', src, dst]
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err))
return out, err, ret
def lxc(self, action):
self.find_cf()
outf = '/var/tmp/svc_'+self.name+'_lxc_'+action+'.log'
if action == 'start':
cmd = ['lxc-start', '-d', '-n', self.name, '-o', outf]
if self.cf:
cmd += ['-f', self.cf]
elif action == 'stop':
cmd = ['lxc-stop', '-n', self.name, '-o', outf]
else:
self.log.error("unsupported lxc action: %s" % action)
return 1
t = datetime.now()
(ret, out, err) = self.vcall(cmd)
len = datetime.now() - t
self.log.info('%s done in %s - ret %i - logs in %s' % (action, len, ret, outf))
if ret != 0:
raise ex.excError
def vm_hostname(self):
if hasattr(self, "hostname"):
return self.hostname
try:
self.hostname = self.get_cf_value("lxc.utsname")
except:
self.hostname = self.name
if self.hostname is None:
self.hostname = self.name
return self.hostname
def get_cf_value(self, param):
self.find_cf()
value = None
if not os.path.exists(self.cf):
return None
with open(self.cf, 'r') as f:
for line in f.readlines():
if param not in line:
continue
if line.strip()[0] == '#':
continue
l = line.replace('\n', '').split('=')
if len(l) < 2:
continue
if l[0].strip() != param:
continue
value = ' '.join(l[1:]).strip()
break
return value
def get_rootfs(self):
rootfs = self.get_cf_value("lxc.rootfs")
if rootfs is None:
self.log.error("could not determine lxc container rootfs")
raise ex.excError
return rootfs
def install_drp_flag(self):
rootfs = self.get_rootfs()
flag = os.path.join(rootfs, ".drp_flag")
self.log.info("install drp flag in container : %s"%flag)
with open(flag, 'w') as f:
f.write(' ')
f.close()
def set_cpuset_clone_children(self):
ppath = "/sys/fs/cgroup/cpuset"
if not os.path.exists(ppath):
self.log.debug("set_clone_children: %s does not exist" % ppath)
return
path = "/sys/fs/cgroup/cpuset/lxc"
val = "1"
if not os.path.exists(path):
self.log.info("mkdir %s" % path)
os.makedirs(path)
for parm in ("cpuset.mems", "cpuset.cpus"):
current_val = self.get_sysfs(path, parm)
if current_val is None:
continue
if current_val == "":
parent_val = self.get_sysfs(ppath, parm)
self.set_sysfs(path, parm, parent_val)
parm = "cgroup.clone_children"
current_val = self.get_sysfs(path, parm)
if current_val is None:
return
if current_val == "1":
self.log.debug("set_cpuset_clone_children: %s/%s already set to 1" % (path, parm))
return
self.set_sysfs(path, parm, "1")
def get_sysfs(self, path, parm):
fpath = os.sep.join([path, parm])
if not os.path.exists(fpath):
self.log.debug("get_sysfs: %s does not exist" % path)
return
with open(fpath, "r") as f:
current_val = f.read().rstrip("\n")
self.log.debug("get_sysfs: %s contains %s" % (fpath, repr(current_val)))
return current_val
def set_sysfs(self, path, parm, val):
fpath = os.sep.join([path, parm])
self.log.info("echo %s >%s" % (val, fpath))
with open(fpath, "w") as f:
f.write(val)
def cleanup_cgroup(self, t="*"):
import glob
for p in glob.glob("/sys/fs/cgroup/%s/lxc/%s-[0-9]" % (t, self.name)) + \
glob.glob("/sys/fs/cgroup/%s/lxc/%s" % (t, self.name)):
try:
os.rmdir(p)
self.log.info("removed leftover cgroup %s" % p)
except Exception as e:
self.log.debug("failed to remove leftover cgroup %s: %s" % (p, str(e)))
def container_start(self):
if not self.svc.create_pg:
self.cleanup_cgroup()
self.set_cpuset_clone_children()
self.install_cf()
self.lxc('start')
def container_stop(self):
self.links = self.get_links()
self.install_cf()
self.lxc('stop')
def post_container_stop(self):
self.cleanup_links(self.links)
self.cleanup_cgroup()
def container_forcestop(self):
""" no harder way to stop a lxc container, raise to signal our
helplessness
"""
raise ex.excError
def get_links(self):
links = []
cmd = ['lxc-info', '--name', self.name]
out, err, ret = justcall(cmd)
if ret != 0:
return []
for line in out.splitlines():
if line.startswith("Link:"):
links.append(line.split()[-1].strip())
return links
def cleanup_link(self, link):
cmd = ["ip", "link", "del", "dev", link]
out, err, ret = justcall(cmd)
if ret == 0:
self.log.info(" ".join(cmd))
else:
self.log.debug(" ".join(cmd)+out+err)
def cleanup_links(self, links):
for link in links:
self.cleanup_link(link)
def _ping(self):
return check_ping(self.addr, timeout=1)
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
if which("lxc-ps"):
return self.is_up_ps(nodename=nodename)
else:
return self.is_up_info(nodename=nodename)
def is_up_info(self, nodename=None):
cmd = ['lxc-info', '--name', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
out, err, ret = justcall(cmd)
if ret != 0:
return False
if 'RUNNING' in out:
return True
return False
def is_up_ps(self, nodename=None):
cmd = ['lxc-ps', '--name', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
out, err, ret = justcall(cmd)
if ret != 0:
return False
if self.name in out:
return True
return False
def get_container_info(self):
cpu_set = self.get_cf_value("lxc.cgroup.cpuset.cpus")
if cpu_set is None:
vcpus = 0
else:
vcpus = len(cpu_set.split(','))
return {'vcpus': str(vcpus), 'vmem': '0'}
def check_manual_boot(self):
return True
def check_capabilities(self):
if not which('lxc-info'):
self.log.debug("lxc-info is not in PATH")
return False
return True
def install_cf(self):
if self.cf is None:
return
cf = self.get_cf_path()
if cf is None:
self.log.debug("could not determine the config file standard hosting directory")
return
if self.cf == cf:
return
dn = os.path.dirname(cf)
if not os.path.isdir(dn):
try:
os.makedirs(dn)
except Exception as e:
raise ex.excError("failed to create directory %s: %s"%(dn, str(e)))
self.log.info("install %s as %s" % (self.cf, cf))
try:
import shutil
shutil.copy(self.cf, cf)
except Exception as e:
raise ex.excError(str(e))
def get_cf_path(self):
path = which('lxc-info')
if path is None:
return None
dpath = os.path.dirname(path)
if not dpath.endswith("bin"):
return
dpath = os.path.realpath(os.path.join(dpath, ".."))
if dpath in (os.sep, "/usr") and os.path.exists("/var/lib/lxc"):
return "/var/lib/lxc/%s/config" % self.name
if dpath in ("/usr/local") and os.path.exists("/usr/local/var/lib/lxc"):
return "/usr/local/var/lib/lxc/%s/config" % self.name
if dpath in (os.sep, "/usr") and os.path.exists("/etc/lxc"):
return "/etc/lxc/%s/config" % self.name
def check_installed_cf(self):
cf = self.get_cf_path()
if cf is None:
self.status_log("could not determine the config file standard hosting directory")
return False
if os.path.exists(cf):
return True
self.status_log("config file is not installed as %s" % cf)
return False
def _status(self, verbose=False):
self.check_installed_cf()
return resContainer.Container._status(self, verbose=verbose)
def find_cf(self):
if self.cf is not None:
return
d_lxc = os.path.join('var', 'lib', 'lxc')
# seen on debian squeeze : prefix is /usr, but containers'
# config files paths are /var/lib/lxc/$name/config
# try prefix first, fallback to other know prefixes
prefixes = [os.path.join(os.sep),
os.path.join(os.sep, 'usr'),
os.path.join(os.sep, 'usr', 'local')]
for prefix in [self.prefix] + [p for p in prefixes if p != self.prefix]:
cf = os.path.join(prefix, d_lxc, self.name, 'config')
if os.path.exists(cf):
cf_d = os.path.dirname(cf)
if not os.path.exists(cf_d):
os.makedirs(cf_d)
self.cf = cf
return
# on Oracle Linux, config is in /etc/lxc
cf = os.path.join(os.sep, 'etc', 'lxc', self.name, 'config')
if os.path.exists(cf):
self.cf = cf
return
self.cf = None
raise ex.excError("unable to find the container configuration file")
def find_prefix(self):
prefixes = [os.path.join(os.sep),
os.path.join(os.sep, 'usr'),
os.path.join(os.sep, 'usr', 'local')]
for prefix in prefixes:
if os.path.exists(os.path.join(prefix, 'bin', 'lxc-start')):
return prefix
return None
def __init__(self,
rid,
name,
guestos="Linux",
cf=None,
rcmd=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.lxc",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
if rcmd is not None:
self.runmethod = rcmd
elif which('lxc-attach') and os.path.exists('/proc/1/ns/pid'):
self.runmethod = ['lxc-attach', '-n', name, '--']
else:
self.runmethod = rcEnv.rsh.split() + [name]
if "lxc-attach" in ' '.join(self.runmethod):
# override getaddr from parent class with a noop
self.getaddr = self.dummy
else:
# enable ping test on start
self.ping = self._ping
self.cf = cf
def dummy(self, cache_fallback=False):
pass
def on_add(self):
self.prefix = self.find_prefix()
if self.prefix is None:
self.log.error("lxc install prefix not found")
raise ex.excInitError
def operational(self):
if not resContainer.Container.operational(self):
return False
cmd = self.runmethod + ['test', '-f', '/bin/systemctl']
out, err, ret = justcall(cmd)
if ret == 1:
# not a systemd container. no more checking.
self.log.debug("/bin/systemctl not found in container")
return True
# systemd on-demand loading will let us start the encap service before
# the network is fully initialized, causing start issues with nfs mounts
# and listening apps.
# => wait for systemd default target to become active
cmd = self.runmethod + ['systemctl', 'is-active', 'default.target']
out, err, ret = justcall(cmd)
if ret == 1:
# if systemctl is-active fails, retry later
self.log.debug("systemctl is-active failed")
return False
if out.strip() == "active":
self.log.debug("systemctl is-active succeeded")
return True
# ok, wait some more
self.log.debug("waiting for lxc to come up")
return False
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def provision(self):
m = __import__("provLxc")
prov = m.ProvisioningLxc(self)
prov.provisioner()
opensvc-1.8~20170412/lib/resApp.py 0000644 0001750 0001750 00000041537 13073467726 016647 0 ustar jkelbert jkelbert """
The module defining the App resource and RsetApps resourceset objects.
"""
from subprocess import Popen
from datetime import datetime
import os
import pwd
import time
import stat
from rcUtilities import justcall, which, lazy, is_string
from rcGlobalEnv import rcEnv
from resources import Resource
from resourceset import ResourceSet
import rcStatus
import rcExceptions as ex
import lock
def run_as_popen_kwargs(fpath):
"""
Setup the Popen keyword args to execute with the
privileges demoted to those of the owner of .
"""
if rcEnv.sysname == "Windows":
return {}
try:
fstat = os.stat(fpath)
except Exception as exc:
raise ex.excError(str(exc))
cwd = rcEnv.pathtmp
user_uid = fstat[stat.ST_UID]
user_gid = fstat[stat.ST_GID]
try:
user_name = pwd.getpwuid(user_uid)[0]
except KeyError:
user_name = "unknown"
try:
pw_record = pwd.getpwnam(user_name)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
except KeyError:
user_home_dir = rcEnv.pathtmp
env = os.environ.copy()
env['HOME'] = user_home_dir
env['LOGNAME'] = user_name
env['PWD'] = cwd
env['USER'] = user_name
return {'preexec_fn': demote(user_uid, user_gid), 'cwd': cwd, 'env': env}
def demote(user_uid, user_gid):
"""
Return a privilege demotion function to plug as Popen() prefex_fn keyword
argument, customized for and .
"""
def result():
"""
A privilege demotion function to plug as Popen() prefex_fn keyword
argument.
"""
os.setgid(user_gid)
os.setuid(user_uid)
return result
class StatusWARN(Exception):
"""
A class to raise to signal status() to return a "warn" state.
"""
pass
class StatusNA(Exception):
"""
A class to raise to signal status() to return a "n/a" state.
"""
pass
class RsetApps(ResourceSet):
"""
The app resource specific resourceset class.
Mainly defines a specific resource sort method honoring the start,
stop, check and info sequencing numbers.
"""
def __init__(self,
type=None,
resources=[],
parallel=False,
optional=False,
disabled=False,
tags=set([])):
ResourceSet.__init__(self,
type=type,
resources=resources,
optional=optional,
disabled=disabled,
parallel=parallel,
tags=tags)
def action(self, action, **kwargs):
"""
Wrap the standard resourceset action method to ignore launcher errors
on stop.
"""
try:
ResourceSet.action(self, action, **kwargs)
except ex.excError:
if action in ("stop", "shutdown", "rollback", "delete", "unprovision"):
self.log.info("there were errors during app stop. please check "
"the quality of the scripts. continuing anyway.")
return
raise
def sort_resources(self, resources, action):
"""
A resource sort method honoring the start, stop, check and info
sequencing numbers.
"""
if action in ("shutdown", "rollback", "unprovision", "delete"):
action = "stop"
attr = action + '_seq'
retained_resources = [res for res in resources if hasattr(res, attr)]
if len(retained_resources) != len(resources):
attr = 'rid'
resources.sort(key=lambda x: getattr(x, attr))
return resources
class App(Resource):
"""
The App resource driver class.
"""
def __init__(self, rid=None,
script=None,
start=None,
stop=None,
check=None,
info=None,
timeout=None,
**kwargs):
if script is None:
raise ex.excInitError("script parameter must be defined in resource %s"%rid)
Resource.__init__(self, rid, "app", **kwargs)
self.rset_class = RsetApps
self.script = script
self.start_seq = start
self.stop_seq = stop
self.check_seq = check
self.info_seq = info
self.timeout = timeout
self.label = os.path.basename(script)
self.lockfd = None
self.script_exec = True
@lazy
def lockfile(self):
"""
Lazy init for the resource lock file path property.
"""
lockfile = os.path.join(rcEnv.pathlock, self.svc.svcname)
lockfile = ".".join((lockfile, self.rid))
return lockfile
def __lt__(self, other):
if other.start_seq is None:
return 1
if self.start_seq is None:
return 0
return self.start_seq < other.start_seq
def validate_on_action(self):
"""
Do sanity checks on the resource parameters before running an action.
"""
self.validate_script_path()
self.validate_script_exec()
def validate_script_exec(self):
"""
Invalidate the script if the file is not executable or not found.
"""
if self.script is None:
self.script_exec = False
return
if which(self.script) is None:
self.status_log("script %s is not executable" % self.script)
self.script_exec = False
def validate_script_path(self):
"""
Converts the script path to a realpath.
Invalidate the script if not found.
If the script is specified as a basename, consider it is to be found
in the /.d directory.
"""
if self.script is None:
return
if not self.script.startswith('/'):
self.script = os.path.join(self.svc.initd, self.script)
if os.path.exists(self.script):
self.script = os.path.realpath(self.script)
return
self.script = None
def is_up(self):
"""
Return 0 if the app resource is up.
"""
if self.pg_frozen():
raise StatusNA()
if self.script is None:
self.status_log("script does not exist", "warn")
raise StatusNA()
if not os.path.exists(self.script):
self.status_log("script %s does not exist" % self.script, "warn")
raise StatusNA()
if self.check_seq is None:
self.status_log("check is not set", "info")
raise StatusNA()
ret = self.run('status', dedicated_log=False)
return ret
def info(self):
"""
Contribute app resource standard and script-provided key/val pairs
to the service's resinfo.
"""
keyvals = [
["script", self.script],
["start", str(self.start_seq) if self.start_seq else ""],
["stop", str(self.stop_seq) if self.stop_seq else ""],
["check", str(self.check_seq) if self.check_seq else ""],
["info", str(self.info_seq) if self.info_seq else ""],
["timeout", str(self.timeout) if self.timeout else ""],
]
if self.info_seq is None:
return self.fmt_info(keyvals)
self.validate_on_action()
buff = self.run('info', dedicated_log=False, return_out=True)
if is_string(buff) != str or len(buff) == 0:
keyvals.append(["Error", "info not implemented in launcher"])
return keyvals
for line in buff.splitlines():
if len(line) == 0:
continue
elements = line.split(":")
if len(elements) < 2:
keyvals.append(["Error", "parsing: %s" % line])
continue
keyvals.append([elements[0].strip(), ":".join(elements[1:]).strip()])
return self.fmt_info(keyvals)
def start(self):
"""
Start the resource.
"""
self.create_pg()
self.validate_on_action()
if self.start_seq is None:
return
if self.script is None:
raise ex.excError("script does not exist")
try:
status = self.is_up()
except:
status = 1
if status == 0:
self.log.info("%s is already started", self.label)
return
ret = self.run('start')
if ret != 0:
raise ex.excError()
self.can_rollback = True
def stop(self):
"""
Stop the resource.
"""
self.validate_on_action()
if self.stop_seq is None:
return
if self.script is None:
return
if self.status() == rcStatus.DOWN:
self.log.info("%s is already stopped", self.label)
return
ret = self.run('stop')
if ret != 0:
raise ex.excError()
def unlock(self):
"""
Release the app action lock.
"""
self.log.debug("release app lock")
lock.unlock(self.lockfd)
try:
os.unlink(self.lockfile)
except OSError:
pass
self.lockfd = None
def lock(self, action=None, timeout=0, delay=1):
"""
Acquire the app action lock.
"""
if self.lockfd is not None:
return
details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
(timeout, delay, action, self.lockfile)
self.log.debug("acquire app lock %s", details)
try:
lockfd = lock.lock(
timeout=timeout,
delay=delay,
lockfile=self.lockfile,
intent=action
)
except lock.lockTimeout as exc:
raise ex.excError("timed out waiting for lock %s: %s" % (details, str(exc)))
except lock.lockNoLockFile:
raise ex.excError("lock_nowait: set the 'lockfile' param %s" % details)
except lock.lockCreateError:
raise ex.excError("can not create lock file %s" % details)
except lock.lockAcquire as exc:
raise ex.excError("another action is currently running %s: %s" % (details, str(exc)))
except ex.excSignal:
raise ex.excError("interrupted by signal %s" % details)
except Exception as exc:
self.save_exc()
raise ex.excError("unexpected locking error %s: %s" % (details, str(exc)))
if lockfd is not None:
self.lockfd = lockfd
def _status(self, verbose=False):
"""
Return the resource status.
"""
self.validate_on_action()
n_ref_res = len(self.svc.get_resources(['fs', 'ip', 'container', 'share', 'disk']))
status = self.svc.group_status(excluded_groups=set([
"sync",
"app",
"disk.scsireserv",
"disk.drbd",
"hb"
]))
if n_ref_res > 0 and str(status["overall"]) not in ("up", "n/a"):
self.log.debug("abort resApp status because ip+fs status is %s", status["overall"])
if verbose:
self.status_log("ip+fs status is %s, skip check"%status["overall"], "info")
self.status_log("not evaluated (instance not up)", "info")
return rcStatus.NA
try:
ret = self.is_up()
except StatusWARN:
return rcStatus.WARN
except StatusNA:
return rcStatus.NA
except ex.excError as exc:
msg = str(exc)
if "intent '" in msg:
action = msg.split("intent '")[-1].split("'")[0]
self.status_log("%s in progress" % action, "info")
self.log.debug("resource status forced to n/a: an action is running")
return rcStatus.NA
if ret == 0:
return self.status_stdby(rcStatus.UP)
elif ret == 1:
return self.status_stdby(rcStatus.DOWN)
self.status_log("check reports errors (%d)" % ret)
return rcStatus.WARN
def set_executable(self):
"""
Switch the script file execution bit to on.
"""
if self.script_exec:
return
if not os.path.exists(self.script):
return
self.vcall(['chmod', '+x', self.script])
def run(self, action, dedicated_log=True, return_out=False):
"""
Acquire the app resource lock, run the action and release for info, start
and stop actions.
Or acquire-release the app resource lock and run status.
"""
self.lock(action)
if action == "status":
self.unlock()
try:
return self._run(action, dedicated_log=dedicated_log, return_out=return_out)
finally:
self.unlock()
def _run(self, action, dedicated_log=True, return_out=False):
"""
Do script validations, run the command associated with the action and
catch errors.
"""
if self.script is None:
return 1
if not os.path.exists(self.script):
if action == "start":
self.log.error("script %s does not exist. can't run %s "
"action", self.script, action)
return 1
elif action == "stop":
self.log.info("script %s does not exist. hosting fs might "
"already be down", self.script)
return 0
elif return_out:
return 0
else:
self.status_log("script %s does not exist" % self.script)
raise StatusWARN()
self.set_executable()
try:
return self._run_cmd(action, dedicated_log=dedicated_log, return_out=return_out)
except OSError as exc:
if exc.errno == 8:
if not return_out and not dedicated_log:
self.status_log("exec format error")
raise StatusWARN()
else:
self.log.error("%s execution error (Exec format error)", self.script)
elif exc.errno == 13:
if not return_out and not dedicated_log:
self.status_log("permission denied")
raise StatusWARN()
else:
self.log.error("%s execution error (Permission Denied)", self.script)
else:
self.svc.save_exc()
return 1
except:
self.svc.save_exc()
return 1
def _run_cmd(self, action, dedicated_log=True, return_out=False):
"""
Switch between buffered outputs or polled execution.
Return stdout if , else return the returncode.
"""
cmd = [self.script, action]
if dedicated_log:
return self._run_cmd_dedicated_log(action, cmd)
elif return_out:
out, err, ret = justcall(cmd)
if ret != 0:
return "Error: info not implemented in launcher"
return out
else:
out, err, ret = justcall(cmd)
self.log.debug("%s returned out=[%s], err=[%s], ret=[%d]", cmd, out, err, ret)
return ret
def _run_cmd_dedicated_log(self, action, cmd):
"""
Poll stdout and stderr to log as soon as new lines are available.
"""
outf = os.path.join(
rcEnv.pathtmp,
'svc_'+self.svc.svcname+'_'+os.path.basename(self.script)+'.log'
)
ofile = open(outf, 'w')
kwargs = {
'stdin': None,
'stdout': ofile.fileno(),
'stderr': ofile.fileno(),
}
kwargs.update(run_as_popen_kwargs(self.script))
user = kwargs.get("env").get("LOGNAME")
self.log.info('exec %s as user %s', ' '.join(cmd), user)
now = datetime.now()
proc = Popen(cmd, **kwargs)
try:
if self.timeout is None:
proc.communicate()
else:
for _ in range(self.timeout+1):
proc.poll()
if proc.returncode is not None:
break
time.sleep(1)
if proc.returncode is None:
self.log.error("execution timeout (%d seconds)", self.timeout)
proc.terminate()
return 1
proc.communicate()
except (KeyboardInterrupt, ex.excSignal):
_len = datetime.now() - now
self.log.error('%s interrupted after %s - ret %d - logs in %s',
action, _len, 1, outf)
ofile.close()
return 1
_len = datetime.now() - now
msg = '%s done in %s - ret %d - logs in %s' % (action, _len, proc.returncode, outf)
if proc.returncode == 0:
self.log.info(msg)
else:
self.log.error(msg)
ofile.close()
return proc.returncode
opensvc-1.8~20170412/lib/resFsRhcsLinux.py 0000644 0001750 0001750 00000000337 13073467726 020330 0 ustar jkelbert jkelbert from rcGlobalEnv import rcEnv
Res = __import__("resFsLinux")
class Mount(Res.Mount):
def start(self):
pass
def stop(self):
pass
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/resIpZone.py 0000644 0001750 0001750 00000004076 13073467726 017330 0 ustar jkelbert jkelbert import time
import resIpSunOS as Res
import rcExceptions as ex
from subprocess import *
from rcGlobalEnv import rcEnv
rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname)
class Ip(Res.Ip):
def __init__(self,
rid=None,
ipdev=None,
ipname=None,
zone=None,
mask=None,
gateway=None,
**kwargs):
Res.Ip.__init__(self,
rid=rid,
ipdev=ipdev,
ipname=ipname,
mask=mask,
gateway=gateway,
**kwargs)
self.zone = zone
self.tags.add(zone)
self.tags.add('zone')
def startip_cmd(self):
cmd=['ifconfig', self.stacked_dev, 'plumb', self.addr, \
'netmask', '+', 'broadcast', '+', 'up' , 'zone' , self.zone ]
return self.vcall(cmd)
def stopip_cmd(self):
cmd=['ifconfig', self.stacked_dev, 'unplumb']
return self.vcall(cmd)
def allow_start(self):
retry = 1
interval = 0
import time
ok = False
if 'noalias' not in self.tags:
for i in range(retry):
ifconfig = rcIfconfig.ifconfig()
intf = ifconfig.interface(self.ipdev)
if intf is not None and intf.flag_up:
ok = True
break
time.sleep(interval)
if not ok:
self.log.error("Interface %s is not up. Cannot stack over it." % self.ipdev)
raise ex.IpDevDown(self.ipdev)
if self.is_up() is True:
self.log.info("%s is already up on %s" % (self.addr, self.ipdev))
raise ex.IpAlreadyUp(self.addr)
if not hasattr(self, 'abort_start_done') and 'nonrouted' not in self.tags and self.check_ping():
self.log.error("%s is already up on another host" % (self.addr))
raise ex.IpConflict(self.addr)
return
if __name__ == "__main__":
for c in (Ip,) :
help(c)
opensvc-1.8~20170412/lib/resSyncZfsSnap.py 0000644 0001750 0001750 00000011763 13073467726 020346 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import rcExceptions as ex
import rcStatus
import time
import datetime
import resSync
import rcZfs
from rcUtilities import justcall
class syncZfsSnap(resSync.Sync):
def __init__(self,
rid=None,
name=None,
dataset=[],
keep=1,
recursive=True,
**kwargs):
resSync.Sync.__init__(self,
rid=rid, type="sync.zfssnap",
**kwargs)
if name:
self.label = "zfs '%s' snapshot %s" % (name, ", ".join(dataset))
else:
self.label = "zfs snapshot %s" % ", ".join(dataset)
self.dataset = dataset
self.recursive = recursive
self.keep = keep
self.name = name
self.zfs = {}
def info(self):
data = [
["dataset", " ".join(self.dataset)],
["name", self.name if self.name else ""],
["keep", str(self.keep)],
["recursive", str(self.recursive).lower()],
["sync_max_delay", str(self.sync_max_delay) if self.sync_max_delay else ""],
["schedule", self.schedule if self.schedule else ""],
]
return self.fmt_info(data)
def on_add(self):
pass
def create_snap(self, dataset):
ds = rcZfs.Dataset(dataset, log=self.log)
snap = ""
if self.name:
suffix = self.name
else:
suffix = ""
suffix += ".snap.%Y-%m-%d.%H:%M:%S"
snap += datetime.datetime.now().strftime(suffix)
try:
ds.snapshot(snapname=snap, recursive=self.recursive)
except Exception as e:
raise ex.excError(str(e))
def list_snaps(self, dataset):
cmd = ["zfs", "list", "-H", "-t", "snapshot", "-o", "name"]
out, err, ret = justcall(cmd)
snaps = []
for line in out.splitlines():
if line.startswith(dataset+"@"):
snaps.append(line)
return snaps
def remove_snap(self, dataset):
cursnaps = self.list_snaps(dataset)
snaps = {}
for sv in cursnaps:
s = sv.replace(dataset+"@", "")
l = s.split('.')
if len(l) < 2:
continue
if l[0] != self.name or l[1] != "snap":
continue
try:
ds = sv.split(".snap.")[-1]
d = datetime.datetime.strptime(ds, "%Y-%m-%d.%H:%M:%S")
snaps[ds] = sv
except Exception as e:
pass
if len(snaps) <= self.keep:
return
sorted_snaps = []
for ds in sorted(snaps.keys(), reverse=True):
sorted_snaps.append(snaps[ds])
for path in sorted_snaps[self.keep:]:
try:
ds = rcZfs.Dataset(path, log=self.log)
if self.recursive:
options = ["-r"]
else:
options = []
ds.destroy(options=options)
except Exception as e:
raise ex.excError(str(e))
def _status_one(self, dataset):
try:
ds = rcZfs.Dataset(dataset, log=self.log)
except Exception as e:
self.status_log("%s %s" % (dataset, str(e)))
return
snaps = []
for sv in self.list_snaps(dataset):
s = sv.replace(dataset+"@", "")
l = s.split('.')
if len(l) < 2:
continue
if l[0] != self.name or l[1] != "snap":
continue
try:
ds = sv.split(".snap.")[-1]
d = datetime.datetime.strptime(ds, "%Y-%m-%d.%H:%M:%S")
snaps.append(d)
except Exception as e:
pass
if len(snaps) == 0:
self.status_log("%s has no snap" % dataset)
return
if len(snaps) > self.keep:
self.status_log("%s has %d too many snaps" % (dataset, len(snaps)-self.keep))
last = sorted(snaps, reverse=True)[0]
limit = datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay)
if last < limit:
self.status_log("%s last snap is too old (%s)" % (dataset, last.strftime("%Y-%m-%d %H:%M:%S")))
def _status(self, verbose=False):
for dataset in self.dataset:
if dataset.count("/") < 1:
self.status_log("misformatted dataset entry %s (expected /)" % dataset)
continue
self._status_one(dataset)
issues = set(self.status_logs_get(["warn"])) - set([''])
if len(issues) == 0:
return rcStatus.UP
return rcStatus.WARN
def _sync_update(self, dataset):
self.create_snap(dataset)
self.remove_snap(dataset)
def sync_update(self):
for dataset in self.dataset:
self._sync_update(dataset)
def __str__(self):
return "%s dataset=%s keep=%s" % (resSync.Sync.__str__(self), str(self.dataset), str(self.keep))
opensvc-1.8~20170412/lib/resHbSg.py 0000644 0001750 0001750 00000001137 13073467726 016742 0 ustar jkelbert jkelbert import resHb
import rcStatus
from rcGlobalEnv import rcEnv
class Hb(resHb.Hb):
""" HeartBeat ressource
"""
def __init__(self, rid=None, name=None, **kwargs):
resHb.Hb.__init__(self, rid, type="hb.sg", **kwargs)
self.label = name
def __status(self, verbose=False):
if 'node' in self.svc.cmviewcl and \
rcEnv.nodename in self.svc.cmviewcl['node'] and \
'status' in self.svc.cmviewcl['node'][rcEnv.nodename] and \
self.svc.cmviewcl['node'][rcEnv.nodename]['status'] == "up":
return rcStatus.UP
return rcStatus.DOWN
opensvc-1.8~20170412/lib/resSyncSymcloneLinux.py 0000644 0001750 0001750 00000004647 13073467726 021576 0 ustar jkelbert jkelbert import os
import logging
from rcGlobalEnv import rcEnv
import rcExceptions as ex
import rcStatus
import resources as Res
import time
import datetime
import resSyncSymclone as symclone
from rcUtilities import which
class syncSymclone(symclone.syncSymclone):
def dev_rescan(self, dev):
dev = dev.replace('/dev/', '')
sysdev = "/sys/block/%s/device/rescan"%dev
self.log.info("echo 1>%s"%sysdev)
with open(sysdev, 'w') as s:
s.write("1")
def refresh_multipath(self, dev):
if which("multipath") is None:
return
cmd = ['multipath', '-v0', '-r', dev]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def dev_ready(self, dev):
cmd = ['sg_turs', dev]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
return False
return True
def wait_for_dev_ready(self, dev):
delay = 1
timeout = 5
for i in range(timeout/delay):
if self.dev_ready(dev):
return
if i == 0:
self.log.info("waiting for device %s to become ready (max %i secs)"%(dev,timeout))
time.sleep(delay)
self.log.error("timed out waiting for device %s to become ready (max %i secs)"%(dev,timeout))
raise ex.excError
def wait_for_devs_ready(self):
self.showdevs()
for pair in self.pairs:
src, dst = self.split_pair(pair)
dev = self.showdevs_etree[dst].find('Dev_Info/pd_name').text
if dev is "Not Visible":
raise ex.excError("pd name is 'Not Visible'. please scan scsi buses and run symcfg discover")
self.dev_rescan(dev)
self.wait_for_dev_ready(dev)
self.refresh_multipath(dev)
def __init__(self,
rid=None,
type="sync.symclone",
symid=None,
pairs=[],
precopy=True,
consistent=True,
**kwargs):
symclone.syncSymclone.__init__(self,
rid=rid,
type=type,
symid=symid,
pairs=pairs,
precopy=precopy,
consistent=consistent,
**kwargs)
opensvc-1.8~20170412/lib/rcGceDisks.py 0000644 0001750 0001750 00000002346 13073467726 017431 0 ustar jkelbert jkelbert import rcExceptions as ex
import json
from rcUtilities import justcall
from rcGlobalEnv import rcEnv
class GceDiskss(object):
arrays = []
def __init__(self, objects=[]):
self.arrays.append(GceDisks())
def __iter__(self):
for array in self.arrays:
yield(array)
class GceDisks(object):
def __init__(self):
self.keys = ['disks', 'snapshots', 'quotas', 'instances']
self.name = "gce project "+rcEnv.fqdn.split(".")[-2]
def get_disks(self):
cmd = ["gcloud", "compute", "disks", "list", "-q", "--format", "json"]
out, err, ret = justcall(cmd)
return out
def get_snapshots(self):
cmd = ["gcloud", "compute", "snapshots", "list", "-q", "--format", "json"]
out, err, ret = justcall(cmd)
return out
def get_quotas(self):
cmd = ["gcloud", "compute", "regions", "list", "-q", "--format", "json"]
out, err, ret = justcall(cmd)
return out
def get_instances(self):
cmd = ["gcloud", "compute", "instances", "list", "-q", "--format", "json"]
out, err, ret = justcall(cmd)
return out
if __name__ == "__main__":
o = GceDiskss()
for gcedisks in o:
print(gcedisks.get_disks())
opensvc-1.8~20170412/lib/resFsFreeBSD.py 0000644 0001750 0001750 00000011327 13073467726 017624 0 ustar jkelbert jkelbert import os
import rcMountsFreeBSD as rcMounts
import resFs as Res
from rcUtilities import qcall, protected_mount, getmount
from rcGlobalEnv import rcEnv
import rcExceptions as ex
from stat import *
def try_umount(self):
cmd = ['umount', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
if ret == 0:
return 0
""" don't try to kill process using the source of a
protected bind mount
"""
if protected_mount(self.mount_point):
return 1
""" best effort kill of all processes that might block
the umount operation. The priority is given to mass
action reliability, ie don't contest oprator's will
"""
cmd = ['sync']
(ret, out, err) = self.vcall(cmd, err_to_info=True)
for i in range(4):
nb_killed = self.killfuser(self.mount_point)
self.log.info('umount %s'%self.mount_point)
cmd = ['umount', self.mount_point]
ret = qcall(cmd)
if ret == 0 or nb_killed == 0:
break
if ret != 0:
self.log.info("no more process using %s, yet umount fails. try forced umount."%self.mount_point)
cmd = ['umount', '-f', self.mount_point]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
return ret
class Mount(Res.Mount):
""" define FreeBSD mount/umount doAction """
def __init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size=None,
**kwargs):
self.Mounts = None
Res.Mount.__init__(self,
rid,
mount_point,
device,
fs_type,
mount_options,
snap_size,
**kwargs)
self.fsck_h = {
'ufs': {
'bin': 'fsck',
'cmd': ['fsck', '-t', 'ufs', '-p', self.device]
},
}
def killfuser(self, dir):
cmd = ['fuser', '-kmc', dir]
(ret, out, err) = self.vcall(cmd, err_to_info=True)
""" return the number of process we sent signal to
"""
l = out.split(':')
if len(l) < 2:
return 0
return len(l[1].split())
def is_up(self):
self.Mounts = rcMounts.Mounts()
return self.Mounts.has_mount(self.device, self.mount_point)
def realdev(self):
dev = None
try:
mode = os.stat(self.device)[ST_MODE]
except:
self.log.debug("can not stat %s" % self.device)
return None
if S_ISCHR(mode):
dev = self.device
else:
mnt = getmount(self.device)
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
m = self.Mounts.has_param("mnt", mnt)
if m is None:
self.log.debug("can't find dev %(dev)s mounted in %(mnt)s in mnttab"%dict(mnt=mnt, dev=self.device))
return None
dev = m.dev
return dev
def disklist(self):
dev = self.realdev()
if dev is None:
return set([])
try:
statinfo = os.stat(dev)
except:
self.log.error("can not stat %s" % dev)
raise ex.excError
return set([dev])
def can_check_writable(self):
return True
def start(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
Res.Mount.start(self)
if self.is_up() is True:
self.log.info("%s is already mounted" % self.label)
return 0
self.fsck()
if not os.path.exists(self.mount_point):
os.makedirs(self.mount_point, 0o755)
if self.fs_type != "":
fstype = ['-t', self.fs_type]
else:
fstype = []
if self.mount_options != "":
mntopt = ['-o', self.mount_options]
else:
mntopt = []
cmd = ['mount']+fstype+mntopt+[self.device, self.mount_point]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
self.Mounts = None
self.can_rollback = True
def stop(self):
if self.Mounts is None:
self.Mounts = rcMounts.Mounts()
if self.is_up() is False:
self.log.info("%s is already umounted" % self.label)
return
for i in range(3):
ret = try_umount(self)
if ret == 0: break
if ret != 0:
self.log.error('failed to umount %s'%self.mount_point)
raise ex.excError
self.Mounts = None
if __name__ == "__main__":
for c in (Mount,) :
help(c)
opensvc-1.8~20170412/lib/provKvm.py 0000644 0001750 0001750 00000010662 13073467726 017054 0 ustar jkelbert jkelbert from provisioning import Provisioning
from rcGlobalEnv import rcEnv
from rcUtilities import which
import os
import rcExceptions as ex
class ProvisioningKvm(Provisioning):
def __init__(self, r):
Provisioning.__init__(self, r)
self.section = r.svc.config.defaults()
if 'snapof' in self.section:
self.snapof = self.section['snapof']
else:
self.snapof = None
if 'snap' in self.section:
self.snap = self.section['snap']
else:
self.snap = None
if 'virtinst' in self.section:
self.virtinst = self.section['virtinst']
else:
self.virtinst = None
def check_kvm(self):
if os.path.exists(self.r.cf):
return True
return False
def setup_kvm(self):
if self.virtinst is None:
self.r.log.error("the 'virtinst' parameter must be set")
raise ex.excError
ret, out, err = self.r.vcall(self.virtinst.split())
if ret != 0:
raise ex.excError
def setup_ips(self):
self.purge_known_hosts()
for resource in self.r.svc.get_resources("ip"):
self.purge_known_hosts(resource.addr)
def purge_known_hosts(self, ip=None):
if ip is None:
cmd = ['ssh-keygen', '-R', self.r.svc.svcname]
else:
cmd = ['ssh-keygen', '-R', ip]
ret, out, err = self.r.vcall(cmd, err_to_info=True)
def setup_snap(self):
if self.snap is None:
self.r.log.error("the 'snap' parameter must be set")
raise ex.excError
if self.snapof is None:
self.r.log.error("the 'snapof' parameter must be set")
raise ex.excError
if not which('btrfs'):
self.r.log.error("'btrfs' command not found")
raise ex.excError
cmd = ['btrfs', 'subvolume', 'snapshot', self.snapof, self.snap]
ret, out, err = self.r.vcall(cmd)
if ret != 0:
raise ex.excError
def get_pubkey(self):
p = os.path.join(os.sep, 'root', '.ssh', 'id_dsa.pub')
try:
with open(p) as f:
pub = f.read(8000)
except:
self.r.log.error('failed to read root public key')
raise ex.excError
return pub
def get_gw(self):
cmd = ['route', '-n']
ret, out, err = self.r.call(cmd)
if ret != 0:
self.r.log.error('failed to read routing table')
raise ex.excError
for line in out.split('\n'):
if line.startswith('0.0.0.0'):
l = line.split()
if len(l) > 1:
return l[1]
self.r.log.error('failed to find default gateway')
raise ex.excError
def get_ns(self):
p = os.path.join(os.sep, 'etc', 'resolv.conf')
with open(p) as f:
for line in f.readlines():
if 'nameserver' in line:
l = line.split()
if len(l) > 1:
return l[1]
self.r.log.error('failed to find a nameserver')
raise ex.excError
def get_config(self):
cf = ['todo']
s = ';'.join(('vm', self.r.name))
cf.append(s)
s = 'ns;192.168.122.1'
cf.append(s)
s = ';'.join(('gw', self.get_gw()))
cf.append(s)
try:
s = ';'.join(('hv_root_pubkey', self.get_pubkey()))
cf.append(s)
except ex.excError:
pass
for resource in self.r.svc.get_resources("ip"):
s = ';'.join((resource.rid, resource.ipdev, resource.addr, resource.mask))
cf.append(s)
cf.append('')
return '\n'.join(cf)
def setup_cfdisk(self):
config = self.get_config()
block = len(config)//512 + 1
cfdisk = os.path.join(rcEnv.pathtmp, self.r.svc.svcname+'.cfdisk')
try:
with open(cfdisk, 'w') as f:
f.write(config)
f.seek(block*512)
f.write('\0')
except:
self.r.log.error("failed to create config disk")
raise ex.excError
self.virtinst += " --disk path=%s,device=floppy"%cfdisk
self.r.log.info("created config disk with content;\n%s", config)
def provisioner(self):
self.setup_snap()
self.setup_cfdisk()
self.setup_kvm()
self.setup_ips()
self.r.log.info("provisioned")
return True
opensvc-1.8~20170412/lib/resSyncDcs.py 0000644 0001750 0001750 00000010204 13073467726 017460 0 ustar jkelbert jkelbert import os
import rcExceptions as ex
import subprocess
import resSync
from rcGlobalEnv import rcEnv
from rcUtilities import justcall
class SyncDcs(resSync.Sync):
def wait_for_devs_ready(self):
pass
def get_active_dcs(self):
if self.active_dcs is not None:
return
for d in self.dcs:
try:
self.log.debug("try dcs", d)
self.dcscmd("get-dcsserver", dcs=d)
self.active_dcs = d
self.log.debug("set active dcs", self.active_dcs)
return
except:
pass
if self.active_dcs is None:
self.log.error("no usable dcs server")
raise ex.excError
def get_auth(self):
if self.username is not None and \
self.password is not None:
return
self.get_active_manager()
if self.active_manager is None:
raise ex.excError("no active manager")
import ConfigParser
if not os.path.exists(self.conf):
raise ex.excError("missing %s"%self.conf)
self.config = ConfigParser.RawConfigParser()
self.config.read(self.conf)
if not self.config.has_section(self.active_manager):
raise ex.excError("no credentials for manager %s in %s"%(self.active_manager, self.conf))
if not self.config.has_option(self.active_manager, "username"):
raise ex.excError("no username set for manager %s in %s"%(self.active_manager, self.conf))
if not self.config.has_option(self.active_manager, "password"):
raise ex.excError("no password set for manager %s in %s"%(self.active_manager, self.conf))
self.username = self.config.get(self.active_manager, "username")
self.password = self.config.get(self.active_manager, "password")
def dcscmd(self, cmd="", verbose=False, check=True, dcs=None):
if len(cmd) == 0:
return
self.get_active_manager()
if dcs is None:
self.get_active_dcs()
dcs = self.active_dcs
self.get_auth()
cmd = self.ssh + [self.active_manager,
"connect-dcsserver -server %s -username %s -password %s -connection %s ; "%(dcs, self.username, self.password, self.conn)+\
cmd+\
" ; disconnect-dcsserver -connection %s"%self.conn]
if verbose:
import re
from copy import copy
_cmd = copy(cmd)
_cmd[2] = re.sub(r'password \S+', 'password xxxxx', _cmd[2])
self.log.info(subprocess.list2cmdline(_cmd))
ret, out, err = self.call(cmd)
else:
ret, out, err = self.call(cmd, errlog=False)
if check and "ErrorId" in err:
raise ex.excError("dcscmd command execution error")
return ret, out, err
def get_active_manager(self):
if self.active_manager is not None:
return
for manager in self.manager:
cmd = self.ssh + [manager, 'id']
out, err, ret = justcall(cmd)
if ret != 0:
continue
self.active_manager = manager
self.log.debug("set active manager", self.active_manager)
return
if self.active_manager is None:
self.log.error("no usable manager")
raise ex.excError
def __init__(self, rid=None, manager=set([]), dcs=set([]), **kwargs):
resSync.Sync.__init__(self, rid=rid, **kwargs)
self.ssh = rcEnv.rsh.split()
self.active_dcs = None
self.active_manager = None
self.username = None
self.password = None
self.dcs = dcs
self.manager = manager
self.conf = os.path.join(rcEnv.pathetc, 'auth.conf')
def on_add(self):
self.get_conn()
def get_conn(self):
from hashlib import md5
import uuid
o = md5()
o.update(uuid.uuid1().hex)
o.update(self.svc.svcname)
self.conn = o.hexdigest()
def __str__(self):
return "%s dcs=%s manager=%s" % (
resSync.Sync.__str__(self),
' '.join(self.dcs),
' '.join(self.manager))
opensvc-1.8~20170412/lib/rcListener.py 0000644 0001750 0001750 00000002067 13073467726 017522 0 ustar jkelbert jkelbert import os
import time
from subprocess import *
from rcGlobalEnv import rcEnv
import thread
import sys
from socket import *
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
config = ConfigParser.RawConfigParser()
config.read(rcEnv.nodeconf)
port = config.getint("listener", "port")
except:
port = rcEnv.listener_port
def HandleClient(conn):
data = conn.recv(1024)
cmd = [rcEnv.nodemgr, 'dequeue_actions']
p = Popen(cmd, stdout=None, stderr=None, stdin=None)
p.communicate()
conn.close()
class listener(object):
def __init__(self):
thread.start_new(self.do, tuple())
while True:
if getattr(sys, 'stop_listener', False):
sys.exit(0)
time.sleep(0.3)
def do(self):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind((gethostname(), port))
sock.listen(5)
while True:
conn, addr = sock.accept()
thread.start_new(HandleClient, (conn,))
if __name__ == '__main__':
a = listener()
opensvc-1.8~20170412/lib/hostidSunOS.py 0000644 0001750 0001750 00000000102 13073467726 017616 0 ustar jkelbert jkelbert from uuid import getnode
def hostid():
return str(getnode())
opensvc-1.8~20170412/lib/rcStatsLinux.py 0000644 0001750 0001750 00000032536 13073467726 020057 0 ustar jkelbert jkelbert import os
import datetime
from rcUtilities import justcall, which
import rcStats
from rcGlobalEnv import rcEnv
class StatsProvider(rcStats.StatsProvider):
def xentopfile(self, day):
f = os.path.join(rcEnv.pathlog, 'xentop', 'xentop'+day)
if os.path.exists(f):
return f
return None
def svc(self, d, day, start, end):
cols = ['date',
'svcname',
'cpu',
'mem',
'cap',
'cap_cpu',
'nodename']
f = self.xentopfile(day)
lines = []
if f is None:
return cols, lines
try:
with open(f, 'r') as f:
buff = f.read()
except:
return cols, lines
_start = datetime.datetime.strptime(start, "%H:%M:%S")
_start = _start.hour * 3600 + _start.minute * 60 + _start.second
_end = datetime.datetime.strptime(end, "%H:%M:%S")
_end = _end.hour * 3600 + _end.minute * 60 + _end.second
for line in buff.split('\n'):
l = line.split()
if len(l) != 21:
continue
_d = datetime.datetime.strptime(" ".join(l[0:2]), "%Y-%m-%d %H:%M:%S")
_d = _d.hour * 3600 + _d.minute * 60 + _d.second
if _d < _start or _d > _end:
continue
l = [" ".join((l[0], l[1]))] + [l[2], l[5], l[7], l[8], l[10], self.nodename]
lines.append(l)
return cols, lines
def cpu(self, d, day, start, end):
f = self.sarfile(day)
if f is None:
return [], []
cmd = ['sar', '-t', '-u', 'ALL', '-P', 'ALL', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
if ret != 0:
cmd = ['sar', '-t', '-u', '-P', 'ALL', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
cols = []
lines = []
for line in buff.split('\n'):
l = line.split()
if 'Linux' in l:
continue
if len(l) == 7:
""" redhat 4
18:50:01 CPU %user %nice %system %iowait %idle
"""
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'iowait',
'idle',
'nodename']
elif len(l) == 8:
""" redhat 5
05:20:01 CPU %user %nice %system %iowait %steal %idle
"""
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'iowait',
'steal',
'idle',
'nodename']
elif len(l) == 11:
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'iowait',
'steal',
'irq',
'soft',
'guest',
'idle',
'nodename']
elif len(l) == 12:
cols = ['date',
'cpu',
'usr',
'nice',
'sys',
'iowait',
'steal',
'irq',
'soft',
'guest',
'gnice',
'idle',
'nodename']
else:
continue
if l[1] == 'CPU':
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def mem_u(self, d, day, start, end):
f = self.sarfile(day)
if f is None:
return [], []
cmd = ['sar', '-t', '-r', '-f', f, '-s', start, '-e', end]
buff, err, ret = justcall(cmd)
if "kbdirty" in buff:
fmt = 4
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbbuffers',
'kbcached',
'kbcommit',
'pct_commit',
'kbactive',
'kbinact',
'kbdirty',
'nodename']
elif "kbactive" in buff:
fmt = 3
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbbuffers',
'kbcached',
'kbcommit',
'pct_commit',
'kbactive',
'kbinact',
'nodename']
elif "pct_commit" in buff:
fmt = 2
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbbuffers',
'kbcached',
'kbcommit',
'pct_commit',
'nodename']
else:
fmt = 1
cols = ['date',
'kbmemfree',
'kbmemused',
'pct_memused',
'kbbuffers',
'kbcached',
'nodename']
n = len(cols) - 1
lines = []
for line in buff.split('\n'):
l = line.split()
if fmt > 1:
if len(l) != n:
continue
else:
if len(l) < n:
continue
l = l[:n]
if l[1] == 'kbmemfree':
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def fs_u(self, d, day, start, end):
now = datetime.datetime.now()
_start = datetime.datetime.strptime(start, "%H:%M:%S")
_start = _start.hour * 3600 + _start.minute * 60 + _start.second
_end = datetime.datetime.strptime(end, "%H:%M:%S")
_end = _end.hour * 3600 + _end.minute * 60 + _end.second
f = os.path.join(rcEnv.pathvar, 'stats_fs_u.%s' % day.lstrip("0"))
cols = ['date',
'nodename',
'mntpt',
'size',
'used']
if not os.path.exists(f):
return [], []
with open(f, 'r') as fd:
buff = fd.read()
import json
lines = []
for line in buff.split('\n'):
try:
l = json.loads(line)
except:
continue
for _l in l:
if len(_l) != 5:
continue
_now = datetime.datetime.strptime(_l[0], "%Y-%m-%d %H:%M:%S.%f")
_now = _now.hour * 3600 + _now.minute * 60 + _now.second
if _now < _start or _now > _end:
continue
lines.append(_l)
return cols, lines
def proc(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'runq_sz',
'plist_sz',
'ldavg_1',
'ldavg_5',
'ldavg_15',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-q', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
lines = []
if "blocked" in buff:
n_fields = 7
drop_blocked = True
else:
n_fields = 6
drop_blocked = False
for line in buff.split('\n'):
l = line.split()
if len(l) != n_fields:
continue
if l[1] == 'runq-sz':
continue
if l[0] == 'Average:':
continue
if drop_blocked:
l = l[:-1]
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def swap(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'kbswpfree',
'kbswpused',
'pct_swpused',
'kbswpcad',
'pct_swpcad',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-S', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
if ret != 0:
""" redhat 5
"""
cmd = ['sar', '-t', '-r', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) == 10:
""" redhat 5
"""
l = [l[0]] + l[6:] + ['0']
if len(l) != 6:
continue
if 'kbswpfree'in l:
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def block(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'tps',
'rtps',
'wtps',
'rbps',
'wbps',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-b', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[1] == 'tps':
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def blockdev(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'dev',
'tps',
'rsecps',
'wsecps',
'avgrq_sz',
'avgqu_sz',
'await',
'svctm',
'pct_util',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-d', '-p', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 10:
continue
if l[1] == 'DEV':
continue
if l[0] == 'Average:':
continue
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
def netdev(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'dev',
'rxpckps',
'txpckps',
'rxkBps',
'txkBps',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-n', 'DEV', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
if "%ifutil" in buff:
n = 10
else:
n = 9
lines = []
div = 1
for line in buff.split('\n'):
l = line.split()
if len(l) != n:
continue
if l[1] in ['IFACE', 'lo'] :
if 'rxbyt/s' in l:
div = 1024
continue
if 'dummy' in l[1] or 'vnet' in l[1] or 'veth' in l[1] or \
'pan' in l[1] or 'sit' in l[1]:
continue
if l[0] == 'Average:':
continue
m = []
m.append('%s %s'%(d, l[0]))
m.append(l[1])
m.append(str(float(l[4])/div))
m.append(str(float(l[5])/div))
m.append(l[2])
m.append(l[3])
m.append(self.nodename)
lines.append(m)
return cols, lines
def netdev_err(self, d, day, start, end):
f = self.sarfile(day)
cols = ['date',
'dev',
'rxerrps',
'txerrps',
'collps',
'rxdropps',
'txdropps',
'nodename']
if f is None:
return [], []
cmd = ['sar', '-t', '-n', 'EDEV', '-f', f, '-s', start, '-e', end]
(buff, err, ret) = justcall(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 11:
continue
if l[1] in ['IFACE', 'lo'] :
continue
if 'dummy' in l[1] or 'vnet' in l[1] or 'veth' in l[1] or \
'pan' in l[1] or 'sit' in l[1]:
continue
if l[0] == 'Average:':
continue
l = l[0:7]
l.append(self.nodename)
l[0] = '%s %s'%(d, l[0])
lines.append(l)
return cols, lines
if __name__ == "__main__":
sp = StatsProvider(interval=200)
print(sp.get('mem_u'))
opensvc-1.8~20170412/lib/checkMpathLinux.py 0000644 0001750 0001750 00000010271 13073467726 020473 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall, printplus
class check(checks.check):
chk_type = "mpath"
svcdevs = {}
def find_svc(self, dev):
for svc in self.svcs:
if svc not in self.svcdevs:
try:
devs = svc.disklist()
except Exception as e:
devs = []
self.svcdevs[svc] = devs
if dev in self.svcdevs[svc]:
return svc.svcname
return ''
def do_check_old(self):
"""
mpath1 (3600508b4000971ca0000f00010650000)
[size=404 GB][features="1 queue_if_no_path"][hwhandler="0"]
\_ round-robin 0 [active]
\_ 0:0:0:2 sda 8:0 [active]
\_ 1:0:0:2 sde 8:64 [active]
\_ 1:0:1:2 sdf 8:80 [active]
\_ 0:0:1:2 sdi 8:128 [active]
\_ round-robin 0 [enabled]
\_ 0:0:2:2 sdc 8:32 [active]
\_ 0:0:3:2 sdd 8:48 [active]
\_ 1:0:3:2 sdh 8:112 [active]
\_ 1:0:2:2 sdb 8:16 [active]
"""
cmd = ['multipath', '-l']
(out, err, ret) = justcall(cmd)
lines = out.split('\n')
if len(lines) < 1:
return self.undef
r = []
wwid = None
dev = None
for line in lines:
if len(line) > 0 and not '\_ ' in line and not line.startswith('['):
# new mpath
# - store previous
# - reset path counter
if wwid is not None:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
n = 0
l = line.split()
if len(l) == 2:
wwid = l[1][1:-1]
elif len(l) == 1:
wwid = l[0]
else:
wwid = None
if wwid is not None and len(wwid) in (17, 33) and wwid[0] in ('2', '3', '5'):
wwid = wwid[1:]
if "[active]" in line and line.startswith(' '):
n += 1
dev = "/dev/"+line.split()[2]
if wwid is not None:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
return r
def do_check(self):
cmd = ['multipathd', '-kshow topo']
(out, err, ret) = justcall(cmd)
if 'list|show' in out:
# multipathd does not support 'show topo'
# try parsing 'multipath -l' output
return self.do_check_old()
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 1:
return self.undef
r = []
wwid = None
dev = None
for line in lines:
if ' dm-' in line:
# new mpath
# - store previous
# - reset path counter
if wwid is not None:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
n = 0
if line.startswith(": "):
line = line.replace(": ", "")
l = line.split()
if l[0].endswith(":"):
# skip prefix: create, swithpg, reload, ...
l = l[1:]
if len(l) < 2:
continue
if l[1].startswith('('):
wwid = l[1][1:-1]
else:
wwid = l[0]
if wwid is not None and len(wwid) in (17, 33) and wwid[0] in ('2', '3', '5'):
wwid = wwid[1:]
if "[active][ready]" in line or \
"active ready" in line:
n += 1
dev = "/dev/"+line.split()[2]
if wwid is not None:
r.append({'chk_instance': wwid,
'chk_value': str(n),
'chk_svcname': self.find_svc(dev),
})
return r
if __name__ == "__main__":
paths = check()
tab = paths.do_check()
printplus(tab)
opensvc-1.8~20170412/lib/rcAssetHP-UX.py 0000644 0001750 0001750 00000020304 13073467726 017570 0 ustar jkelbert jkelbert import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
from subprocess import *
import rcAsset
import datetime
os.environ['PATH'] += ":/opt/ignite/bin:/opt/propplus/bin"
class Asset(rcAsset.Asset):
def __init__(self, node):
rcAsset.Asset.__init__(self, node)
out, err, ret = justcall(['print_manifest'])
if ret != 0:
self.manifest = []
else:
self.manifest = out.split('\n')
self.parse_memory()
def _get_mem_bytes(self):
cmd = ['swapinfo', '-Mq']
(out, err, ret) = justcall(cmd)
if ret != 0:
return '0'
return str(int(out)//1024)
def parse_memory(self):
self.banks = 0
self.slots = 0
cmd = ['cprop', '-summary', '-c', 'Memory']
out, err, ret = justcall(cmd)
if ret != 0:
return '0'
in_banks = True
for line in out.split('\n'):
if 'Empty Slots' in line:
in_banks = False
elif 'Instance' in line:
self.slots += 1
if in_banks:
self.banks += 1
def _get_mem_banks(self):
s = str(self.banks)
if s == '0':
s = self._get_mem_banks_ts99()
return s
def _get_mem_slots(self):
return str(self.slots)
def _get_os_vendor(self):
return 'HP'
def _get_os_release(self):
(out, err, ret) = justcall(['uname', '-r'])
if ret != 0:
return 'Unknown'
return out.split('\n')[0].strip()
def _get_os_kernel(self):
(out, err, ret) = justcall(['swlist', '-l', 'bundle', 'QPKBASE'])
if ret != 0:
return 'Unknown'
for line in out.split('\n'):
if 'QPKBASE' in line:
return line.split()[1]
return 'Unknown'
def _get_os_arch(self):
(out, err, ret) = justcall(['uname', '-m'])
if ret != 0:
return 'Unknown'
return out.split('\n')[0].strip()
def _get_cpu_freq(self):
freq = self._get_cpu_freq_manifest()
if freq == "Unknown":
freq = self._get_cpu_freq_adb()
return freq
def _get_cpu_freq_manifest(self):
m = self._get_cpu_model()
if '(' not in m:
return "Unknown"
s = m.split('(')[-1]
s = s.split(',')[0]
freq, unit = s.split()
if unit == 'GHz':
try:
freq = float(freq)
except:
return "Unknown"
freq = str(int(freq * 1000))
return freq
def _get_cpu_freq_adb(self):
process = Popen(['adb', '/stand/vmunix', '/dev/kmem'], stdin=PIPE, stdout=PIPE, stderr=None)
(out, err) = process.communicate(input='itick_per_usec/2d')
if process.returncode != 0:
process = Popen(['adb', '-k', '/stand/vmunix', '/dev/mem'], stdin=PIPE, stdout=PIPE, stderr=None)
(out, err) = process.communicate(input='itick_per_usec/D')
if process.returncode != 0:
return 'Unknown'
lines = out.split('\n')
if len(lines) < 2:
return 'Unknown'
return lines[1].split()[-1]
def _get_cpu_cores(self):
for line in self.manifest:
if 'Processors:' in line:
return line.split()[-1]
return '0'
def _get_cpu_dies(self):
n = self._get_cpu_cores_per_die()
if n == 0:
return str(self._get_cpu_dies_ts99())
cores = int(self._get_cpu_cores())
return str(cores // n)
def _get_cpu_cores_per_die(self):
n = 0
i = 0
for line in self.manifest:
line = line.replace('(', '').replace(')', '')
if 'Processors:' in line:
i = 1
continue
if i > 0 and i < 4:
i += 1
if "core" not in line and "socket" not in line:
continue
words = line.split()
for j, w in enumerate(words):
if w == "socket":
try:
n = int(words[j-2])
except:
break
if 'core' in w:
try:
n = int(words[j-1])
except:
break
elif i >= 4:
break
return n
def _get_mem_banks_ts99(self):
p = '/var/tombstones/ts99'
if not os.path.exists(p):
return '0'
with open(p, 'r') as f:
buff = f.read()
lines = buff.split('\n')
c = 0
for line in lines:
if "DIMM Error Information" in line:
c += 1
return str(c)
def _get_cpu_dies_ts99(self):
# count different serial numbers
p = '/var/tombstones/ts99'
if not os.path.exists(p):
return 1
with open(p, 'r') as f:
buff = f.read()
lines = buff.split('\n')
serials = set([])
for line in lines:
if "Cpu Serial Number" in line:
serials.add(line.split()[-1])
if len(serials) == 0:
return 1
return len(serials)
def _get_cpu_model(self):
s = self._get_cpu_model_manifest()
if s == 'Unknown':
s = self._get_cpu_model_ts99()
return s
def _get_cpu_model_ts99(self):
p = '/var/tombstones/ts99'
if not os.path.exists(p):
return 'Unknown'
with open(p, 'r') as f:
buff = f.read()
lines = buff.split('\n')
for line in lines:
if "CPU Module" in line:
return line.strip().replace("CPU Module", "rev").replace(' ', ' ')
return 'Unknown'
def _get_cpu_model_manifest(self):
marker = False
for line in self.manifest:
if 'Processors:' in line:
marker = True
continue
if marker:
if "processor" not in line:
return 'Unknown'
e = line.split()
return ' '.join(e[1:]).replace('processors','').replace('processor','')
return 'Unknown'
def _get_serial(self):
(out, err, ret) = justcall(['getconf', 'MACHINE_SERIAL'])
if ret != 0:
return 'Unknown'
return out.strip()
def _get_model(self):
(out, err, ret) = justcall(['getconf', 'MACHINE_MODEL'])
if ret != 0:
return 'Unknown'
return out.strip()
def __get_hba(self):
if hasattr(self, "hba"):
return self.hba
self.hba = []
cmd = ['/usr/sbin/ioscan', '-FunC', 'fc']
out, err, ret = justcall(cmd)
if ret != 0:
return self.hba
lines = out.split('\n')
if len(lines) < 2:
return self.hba
for line in lines:
if '/dev/' not in line:
continue
dev = line.strip()
hba_type = 'fc'
cmd = ['/opt/fcms/bin/fcmsutil', dev]
out, err, ret = justcall(cmd)
if ret != 0:
continue
for _line in out.split('\n'):
if not 'N_Port Port World Wide Name' in _line:
continue
hba_id = _line.split('=')[-1].strip().lstrip("0x")
cmd = ['/opt/fcms/bin/fcmsutil', dev, 'get', 'remote', 'all']
out, err, ret = justcall(cmd)
if ret != 0:
continue
targets = []
for _line in out.split('\n'):
if not 'Target Port World Wide Name' in _line:
continue
targets.append(_line.split('=')[-1].strip().lstrip("0x"))
self.hba.append((hba_id, hba_type, targets))
return self.hba
def _get_hba(self):
hba = self.__get_hba()
l = []
for hba_id, hba_type, targets in hba:
l.append((hba_id, hba_type))
return l
def _get_targets(self):
hba = self.__get_hba()
l = []
for hba_id, hba_type, targets in hba:
for target in targets:
l.append((hba_id, target))
return l
opensvc-1.8~20170412/lib/rcPasswdAIX.py 0000644 0001750 0001750 00000000533 13073467726 017534 0 ustar jkelbert jkelbert from subprocess import *
from rcUtilities import which
def change_root_pw(pw):
if which('chpasswd') is not None:
cmd = ['chpasswd']
_input = "root:"+pw
else:
cmd = ['passwd', '-stdin', 'root']
_input = pw
p = Popen(cmd, stdin=PIPE)
p.stdin.write(_input)
p.communicate()
return p.returncode
opensvc-1.8~20170412/lib/snapZfsSunOS.py 0000644 0001750 0001750 00000002540 13073467726 017760 0 ustar jkelbert jkelbert from rcUtilities import justcall
import rcExceptions as ex
import snap
from rcZfs import dataset_exists
class Snap(snap.Snap):
"""Defines a snap object with ZFS
"""
def snapcreate(self, m):
""" create a snapshot for m
add self.snaps[m] with
dict(snapinfo key val)
"""
dataset = m.device
if not dataset_exists(dataset, 'filesystem'):
raise ex.syncNotSnapable
snapdev = dataset +'@osvc_sync'
mount_point = m.mount_point
snap_mount_point= mount_point + '/.zfs/snapshot/osvc_sync/'
if dataset_exists(snapdev, 'snapshot'):
(ret, buff, err) = self.vcall(['zfs', 'destroy', snapdev ])
if ret != 0:
raise ex.syncSnapDestroyError
(ret, buff, err) = self.vcall(['zfs', 'snapshot', snapdev ])
if ret != 0:
raise ex.syncSnapCreateError
self.snaps[mount_point]={'snap_mnt' : snap_mount_point, \
'snapdev' : snapdev }
def snapdestroykey(self, snap_key):
""" destroy a snapshot for a mount_point
"""
snapdev = self.snaps[snap_key]['snapdev']
if not dataset_exists(snapdev, 'snapshot'):
return
(ret, buff, err) = self.vcall(['zfs', 'destroy', snapdev ])
if ret != 0:
raise ex.syncSnapDestroyError
opensvc-1.8~20170412/lib/rcSysReportSunOS.py 0000644 0001750 0001750 00000000230 13073467726 020625 0 ustar jkelbert jkelbert import rcSysReport
class SysReport(rcSysReport.SysReport):
def __init__(self, node=None):
rcSysReport.SysReport.__init__(self, node=node)
opensvc-1.8~20170412/lib/resSyncZfs.py 0000644 0001750 0001750 00000032277 13073467726 017527 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
import datetime
from subprocess import *
import rcExceptions as ex
import rcStatus
import resSync
from rcZfs import a2pool_dataset, Dataset
class SyncZfs(resSync.Sync):
"""define zfs sync resource to be zfs send/zfs receive between nodes
"""
def __init__(self,
rid=None,
target=None,
src=None,
dst=None,
delta_store=None,
sender=None,
recursive = True,
snap_size=0,
**kwargs):
resSync.Sync.__init__(self,
rid=rid,
type="sync.zfs",
**kwargs)
self.label = "zfs of %s to %s"%(src, ",".join(target))
self.target = target
self.sender = sender
self.recursive = recursive
self.src = src
self.dst = dst
(self.src_pool, self.src_ds) = a2pool_dataset(src)
(self.dst_pool, self.dst_ds) = a2pool_dataset(dst)
if delta_store is None:
self.delta_store = rcEnv.pathvar
else:
self.delta_store = delta_store
def info(self):
data = [
["src", self.src],
["dst", self.dst],
["sender", self.sender if self.sender else ""],
["target", " ".join(self.target) if self.target else ""],
["recursive", str(self.recursive).lower()],
]
return self.fmt_info(data)
def pre_action(self, action):
"""Prepare dataset snapshots
Don't sync PRD services when running on !PRD node
skip snapshot creation if delay_snap in tags
delay_snap should be used for oracle archive datasets
"""
resources = [ r for r in self.rset.resources if not r.skip and not r.is_disabled() ]
if len(resources) == 0:
return
self.pre_sync_check_prd_svc_on_non_prd_node()
for i, r in enumerate(resources):
if 'delay_snap' in r.tags:
continue
r.get_info()
if action in ['sync_update', 'sync_resync', 'sync_drp', 'sync_nodes']:
if action == 'sync_nodes' and self.target != ['nodes']:
return
if action == 'sync_drp' and self.target != ['drpnodes']:
return
nb = 0
tgts = r.targets.copy()
if len(tgts) == 0 :
continue
r.get_info()
if not r.snap_exists(r.src_snap_tosend):
r.create_snap(r.src_snap_tosend)
def __str__(self):
return "%s target=%s src=%s" % (resSync.Sync.__str__(self),\
self.target, self.src)
def snap_exists(self, snapname, node=None):
cmd = ['env', 'PATH=/usr/sbin:/sbin', 'zfs', 'list', '-t', 'snapshot', snapname]
if node is not None:
cmd = rcEnv.rsh.split() + [node] + cmd
(ret, out, err) = self.call(cmd, errlog=False)
if ret == 0:
return True
else:
return False
def create_snap(self, snap):
snapds=Dataset(snap)
if snapds.exists():
self.log.error('%s should not exist'%snap)
raise ex.excError
if self.recursive :
cmd = ['zfs', 'snapshot' , '-r' , snap ]
else:
cmd = ['zfs', 'snapshot' , snap ]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def get_src_info(self):
self.src_snap_sent = self.src_ds + '@sent'
self.src_snap_tosend = self.src_ds + '@tosend'
self.tosend = "tosend"
def get_dst_info(self):
self.dst_snap_sent = self.dst_ds + '@sent'
self.dst_snap_tosend = self.dst_ds + '@tosend'
def get_peersenders(self):
self.peersenders = set([])
if 'nodes' == self.sender:
self.peersenders |= self.svc.nodes
self.peersenders -= set([rcEnv.nodename])
def get_targets(self):
self.targets = set()
if 'nodes' in self.target:
self.targets |= self.svc.nodes
if 'drpnodes' in self.target:
self.targets |= self.svc.drpnodes
self.targets -= set([rcEnv.nodename])
def get_info(self):
self.get_targets()
self.get_src_info()
self.get_dst_info()
def sync_nodes(self):
"""alias to sync_update"""
self.sync_update()
def sync_full(self):
"""
Purge all local and remote snaps, and call sync_update, which
will do a non-incremental send/recv.
"""
self.destroy_all_snaps()
self.sync_update()
def destroy_all_snaps(self):
self.force_remove_snap(self.src_snap_tosend)
self.force_remove_snap(self.src_snap_sent)
for node in self.targets:
self.force_remove_snap(self.dst_snap_tosend, node)
self.force_remove_snap(self.dst_snap_sent, node)
def zfs_send_incremental(self, node):
if self.recursive:
send_cmd = ['zfs', 'send', '-R', '-i',
self.src_snap_sent, self.src_snap_tosend]
else:
send_cmd = ['zfs', 'send', '-i',
self.src_snap_sent, self.src_snap_tosend]
receive_cmd = ['env', 'PATH=/usr/sbin:/sbin', 'zfs', 'receive', '-dF', self.dst_pool]
if node is not None:
receive_cmd = rcEnv.rsh.strip(' -n').split() + [node] + receive_cmd
self.log.info(' '.join(send_cmd + ["|"] + receive_cmd))
p1 = Popen(send_cmd, stdout=PIPE)
p2 = Popen(receive_cmd, stdin=p1.stdout, stdout=PIPE)
buff = p2.communicate()
if p2.returncode != 0:
if buff[1] is not None and len(buff[1]) > 0:
self.log.error(buff[1])
self.log.error("sync update failed")
raise ex.excError
if buff[0] is not None and len(buff[0]) > 0:
self.log.info(buff[0])
def zfs_send_initial(self, node=None):
if self.recursive:
send_cmd = ['zfs', 'send', '-R', self.src_snap_tosend]
else:
send_cmd = ['zfs', 'send', self.src_snap_tosend]
receive_cmd = ['env', 'PATH=/usr/sbin:/sbin', 'zfs', 'receive', '-dF', self.dst_pool ]
if node is not None:
receive_cmd = rcEnv.rsh.strip(' -n').split() + [node] + receive_cmd
self.log.info(' '.join(send_cmd + ["|"] + receive_cmd))
p1 = Popen(send_cmd, stdout=PIPE)
p2 = Popen(receive_cmd, stdin=p1.stdout, stdout=PIPE)
buff = p2.communicate()
if p2.returncode != 0:
if buff[1] is not None and len(buff[1]) > 0:
self.log.error(buff[1])
self.log.error("full sync failed")
raise ex.excError
if buff[0] is not None and len(buff[0]) > 0:
self.log.info(buff[0])
def force_remove_snap(self, snap, node=None):
try:
self.remove_snap(snap, node=node, check_exists=False)
except ex.excError:
pass
def remove_snap(self, snap, node=None, check_exists=True):
if check_exists and not self.snap_exists(snap, node=node):
return
if self.recursive :
cmd = ['zfs', 'destroy', '-r', snap]
else:
cmd = ['zfs', 'destroy', snap]
if node is not None:
cmd = rcEnv.rsh.split() + [node, 'env', 'PATH=/usr/sbin:/sbin'] + cmd
if check_exists:
err_to_info = False
else:
err_to_info = True
(ret, out, err) = self.vcall(cmd, err_to_info=err_to_info)
if ret != 0:
raise ex.excError
def rename_snap(self, src, dst, node=None):
if self.snap_exists(dst, node):
self.log.error("%s should not exist"%dst)
raise ex.excError
if self.recursive :
cmd = ['zfs', 'rename', '-r', src, dst]
else:
cmd = ['zfs', 'rename', src, dst]
if node is not None:
cmd = rcEnv.rsh.split() + [node, 'env', 'PATH=/usr/sbin:/sbin'] + cmd
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def rotate_snaps(self, src, dst, node=None):
self.remove_snap(dst, node)
self.rename_snap(src, dst, node)
def sync_update(self):
"""
test if service status is UP else return
create the snap_tosend if not already created (during pre_action)
if a snap has already been sent
then for all targets
zfs_send_incremental
rotate snap
else for all targets
zfs_send_initial
rotate snap
rotate snap on local node
"""
self.pre_sync_check_svc_not_up()
self.pre_sync_check_flex_primary()
self.get_info()
if not self.snap_exists(self.src_snap_tosend):
self.create_snap(self.src_snap_tosend)
if self.snap_exists(self.src_snap_sent):
for n in self.targets:
self.zfs_send_incremental(n)
self.rotate_snaps(self.dst_snap_tosend, self.dst_snap_sent, n)
else:
for n in self.targets:
self.zfs_send_initial(n)
self.rotate_snaps(self.dst_snap_tosend, self.dst_snap_sent, n)
self.rotate_snaps(self.src_snap_tosend, self.src_snap_sent)
self.write_statefile()
for n in self.targets:
self.push_statefile(n)
def start(self):
pass
def stop(self):
pass
def can_sync(self, target=None):
try:
ls = self.get_local_state()
ts = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
except IOError:
self.log.error("zfs state file not found")
return True
except:
import sys
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
return False
if self.skip_sync(ts):
self.status_log("Last sync on %s older than %i minutes"%(ts, self.sync_max_delay))
return False
return True
def _status(self, verbose=False):
try:
ls = self.get_local_state()
now = datetime.datetime.now()
last = datetime.datetime.strptime(ls['date'], "%Y-%m-%d %H:%M:%S.%f")
delay = datetime.timedelta(minutes=self.sync_max_delay)
except IOError:
self.status_log("zfs state file not found")
return rcStatus.WARN
except:
import sys
import traceback
e = sys.exc_info()
print(e[0], e[1], traceback.print_tb(e[2]))
return rcStatus.WARN
if last < now - delay:
self.status_log("Last sync on %s older than %i minutes"%(last, self.sync_max_delay))
return rcStatus.WARN
return rcStatus.UP
def check_remote(self, node):
rs = self.get_remote_state(node)
if self.snap1_uuid != rs['uuid']:
self.log.error("%s last update uuid doesn't match snap1 uuid"%(node))
raise ex.excError
def get_remote_state(self, node):
self.set_statefile()
cmd1 = ['env', 'LANG=C', 'cat', self.statefile]
cmd = rcEnv.rsh.split() + [node] + cmd1
(ret, out, err) = self.call(cmd)
if ret != 0:
self.log.error("could not fetch %s last update uuid"%node)
raise ex.excError
return self.parse_statefile(out, node=node)
def get_local_state(self):
self.set_statefile()
with open(self.statefile, 'r') as f:
out = f.read()
return self.parse_statefile(out)
def get_snap_uuid(self, snap):
cmd = ['zfs', 'list', '-H', '-o', 'creation', '-t', 'snapshot', snap]
(ret, out, err) = self.call(cmd)
if ret != 0:
raise ex.excError
self.snap_uuid = out.strip()
def set_statefile(self):
self.statefile = os.path.join(rcEnv.pathvar,
self.svc.svcname+'_'+self.rid+'_zfs_state')
def write_statefile(self):
self.set_statefile()
self.get_snap_uuid(self.src_snap_sent)
self.log.info("update state file with snap uuid %s"%self.snap_uuid)
with open(self.statefile, 'w') as f:
f.write(str(datetime.datetime.now())+';'+self.snap_uuid+'\n')
def _push_statefile(self, node):
cmd = rcEnv.rcp.split() + [self.statefile, node+':'+self.statefile.replace('#', '\#')]
(ret, out, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def push_statefile(self, node):
self.set_statefile()
self._push_statefile(node)
self.get_peersenders()
for s in self.peersenders:
self._push_statefile(s)
def parse_statefile(self, out, node=None):
self.set_statefile()
if node is None:
node = rcEnv.nodename
lines = out.strip().split('\n')
if len(lines) != 1:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
fields = lines[0].split(';')
if len(fields) != 2:
self.log.error("%s:%s is corrupted"%(node, self.statefile))
raise ex.excError
return dict(date=fields[0], uuid=fields[1])
opensvc-1.8~20170412/lib/rcPrintTable.py 0000644 0001750 0001750 00000010420 13073467726 017771 0 ustar jkelbert jkelbert from __future__ import print_function
from __future__ import unicode_literals
import sys
from textwrap import wrap
from rcColor import color, colorize
from rcUtilities import term_width
import rcExceptions as ex
if sys.version_info[0] >= 3:
from functools import reduce
unicode = str
def parse_data(data):
try:
lines = data.splitlines()
except AttributeError:
raise ex.excError
if len(lines) < 2:
return []
labels = list(map(lambda x: x.split('.')[-1], lines[0].split(',')))
lines = lines[1:]
rows = []
for line in lines:
row = []
incell = False
cell_begin = 0
l = len(line)
for i, c in enumerate(line):
if c != ',' and i < l-1:
continue
if incell and ((i>1 and line[i-1] == '"') or i == l-1):
incell = False
if not incell:
if i > 0:
if i < l-1:
cell = line[cell_begin:i].replace('""', '"')
else:
cell = line[cell_begin:].replace('""', '"')
else:
cell = ""
if len(cell) > 1 and cell[0] == '"' and cell[-1] == '"':
if len(cell) > 2:
cell = cell[1:-1]
else:
cell = ""
row.append(cell)
cell_begin = i+1
if i tw or cont:
rpad = tw - (length % tw)
line += " " * rpad
print(rcColor.colorize(line, color))
def print_table_default(data):
try:
data = validate_format(data)
except Exception as e:
return
labels = data[0]
max_label_len = reduce(lambda x,y: max(x,len(y)), labels, 0)+1
data = data[1:]
subsequent_indent = ""
for i in range(max_label_len+3):
subsequent_indent += " "
fmt = " %-"+str(max_label_len)+"s "
for j, d in enumerate(data):
print("-")
for i, label in enumerate(labels):
val = '\n'.join(wrap(convert(d[i]),
initial_indent = "",
subsequent_indent = subsequent_indent,
width=78
))
try:
print(colorize(fmt % (label+":"), color.LIGHTBLUE), val)
except UnicodeEncodeError:
print(colorize(fmt % (label+":"), color.LIGHTBLUE), val.encode("utf-8"))
def print_table_csv(data):
try:
data = validate_format(data)
except ex.excError:
raise ex.excError("unsupported format for this action")
for d in data:
print(";".join(map(lambda x: "'%s'" % unicode(x), d)))
opensvc-1.8~20170412/lib/resScsiReservSunOS.py 0000777 0001750 0001750 00000000000 13073467726 025326 2resScsiReservLinux.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resScsiReservFreeBSD.py 0000777 0001750 0001750 00000000000 13073467726 025531 2resScsiReservLinux.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/rcPkgAIX.py 0000644 0001750 0001750 00000000774 13073467726 017023 0 ustar jkelbert jkelbert import os
from rcUtilities import justcall, which
from rcGlobalEnv import rcEnv
def listpkg():
cmd = ['lslpp', '-Lc']
out, err, ret = justcall(cmd)
if ret != 0:
return []
lines = []
for line in out.split('\n'):
l = line.split(':')
if len(l) < 5:
continue
pkgvers = l[2]
pkgname = l[1].replace('-'+pkgvers, '')
x = [rcEnv.nodename, pkgname, pkgvers, '']
lines.append(x)
return lines
def listpatch():
return []
opensvc-1.8~20170412/lib/checkBtrfsDevStatsLinux.py 0000644 0001750 0001750 00000004711 13073467726 022162 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
import os
import re
from rcGlobalEnv import rcEnv
class check(checks.check):
"""
# btrfs dev stats /mnt
[/dev/loop0].write_io_errs 0
[/dev/loop0].read_io_errs 0
[/dev/loop0].flush_io_errs 0
[/dev/loop0].corruption_errs 0
[/dev/loop0].generation_errs 0
[/dev/loop1].write_io_errs 0
[/dev/loop1].read_io_errs 0
[/dev/loop1].flush_io_errs 0
[/dev/loop1].corruption_errs 0
[/dev/loop1].generation_errs 0
"""
chk_type = "btrfs"
def _get_dev_stats(self, mntpt, data):
cmd = ['btrfs', 'dev', 'stats', mntpt]
out, err, ret = justcall(cmd)
if ret != 0:
return data
for line in out.split('\n'):
l = line.split()
if len(l) != 2:
continue
key, val = l
l = key.split('.')
if len(l) != 2:
continue
dev, err_type = l
dev = dev.lstrip('[').rstrip(']')
if dev not in data:
data[dev] = {}
data[dev][err_type] = val
return data
def get_dev_stats(self):
mntpts = self.get_btrfs_mounts()
data = {}
if mntpts is None:
return data
for mntpt in mntpts:
data = self._get_dev_stats(mntpt, data)
return data
def get_btrfs_mounts(self):
mntpts = []
p = '/proc/mounts'
if not os.path.exists(p):
return
with open(p, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
if 'btrfs' not in line:
continue
l = line.split()
if len(l) < 2:
continue
mntpt = l[1]
if not os.path.exists(mntpt):
continue
mntpts.append(mntpt)
return mntpts
def find_svc(self, dev):
for svc in self.svcs:
if dev in svc.disklist():
return svc.svcname
return ''
def do_check(self):
r = []
data = self.get_dev_stats()
if data is None:
return r
for dev, _data in data.items():
for err_type, val in _data.items():
r.append({'chk_instance': dev+'.'+err_type,
'chk_value': val,
'chk_svcname': self.find_svc(dev),
})
return r
if __name__ == "__main__":
o = check()
r = o.do_check()
print(r)
opensvc-1.8~20170412/lib/resContainerHpVm.py 0000644 0001750 0001750 00000010061 13073467726 020630 0 ustar jkelbert jkelbert import rcStatus
import resources as Res
import time
import os
import rcExceptions as ex
from rcUtilities import qcall
from rcGlobalEnv import rcEnv
import resContainer
u = __import__('rcUtilitiesHP-UX')
import resDiskHpVm
class HpVm(resContainer.Container):
def __init__(self,
rid,
name,
guestos="HP-UX",
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.hpvm",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.vg = resVgHpVm.Vg(
rid = 'vmdg#'+self.rid,
name = 'vmdg_'+self.name,
container_name = self.name
)
def on_add(self):
self.vg.svc = self.svc
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def files_to_sync(self):
import glob
a = self.vg.files_to_sync()
guest = os.path.join(os.sep, 'var', 'opt', 'hpvm', 'guests', self.name)
uuid = os.path.realpath(guest)
share = os.path.join(rcEnv.pathvar, 'vg_'+self.name+'_*.share')
if os.path.exists(guest):
a.append(guest)
if os.path.exists(uuid):
a.append(uuid)
files = glob.glob(share)
if len(files) > 0:
a += files
return a
def ping(self):
return u.check_ping(self.addr, timeout=1, count=1)
def container_start(self):
cmd = ['/opt/hpvm/bin/hpvmstart', '-P', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_stop(self):
cmd = ['/opt/hpvm/bin/hpvmstop', '-g', '-F', '-P', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_forcestop(self):
cmd = ['/opt/hpvm/bin/hpvmstop', '-F', '-P', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def check_manual_boot(self):
cmd = ['/opt/hpvm/bin/hpvmstatus', '-M', '-P', self.name]
(ret, out, err) = self.call(cmd, cache=True)
if ret != 0:
return False
if out.split(":")[11] == "Manual":
return True
self.log.info("Auto boot should be turned off")
return False
def get_container_info(self):
cmd = ['/opt/hpvm/bin/hpvmstatus', '-M', '-P', self.name]
(ret, out, err) = self.call(cmd, cache=True)
self.info = {'vcpus': '0', 'vmem': '0'}
if ret != 0:
return self.info
self.info['vcpus'] = out.split(':')[19].split(';')[0]
self.info['vmem'] = out.split(':')[20].split(';')[0]
if 'GB' in self.info['vmem']:
self.info['vmem'] = str(1024*1024*int(self.info['vmem'].replace('GB','')))
return self.info
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
cmd = ['/opt/hpvm/bin/hpvmstatus', '-M', '-P', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
if out.split(":")[10] == "On":
return True
return False
def check_capabilities(self):
if os.path.exists('/opt/hpvm/bin/hpvmstatus'):
return True
return False
def _migrate(self):
cmd = ['hpvmmigrate', '-o', '-P', self.name, '-h', self.svc.options.destination_node]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def disklist(self):
return self.vg.disklist()
def devlist(self):
return self.vg.devlist()
def presync(self):
return self.vg.presync()
def postsync(self):
return self.vg.postsync()
opensvc-1.8~20170412/lib/rcStatsCollectDarwin.py 0000644 0001750 0001750 00000005247 13073467726 021511 0 ustar jkelbert jkelbert import os
import sys
import datetime
from rcUtilities import justcall
from rcGlobalEnv import rcEnv
def collect(node):
now = datetime.datetime.now()
def fs_u():
vars = ['date',
'nodename',
'mntpt',
'size',
'used']
cmd = ['df', '-lkP']
(out, err, ret) = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 2:
return self.undef
vals = []
for line in lines[1:]:
l = line.split()
if len(l) != 6:
continue
if l[5].startswith('/Volumes'):
# Darwin automount package files under /Volumes
continue
vals.append([str(now), node.nodename, l[5], l[1], l[4].replace('%','')])
return (vars, vals)
def mem_u():
basedir = os.path.join(rcEnv.pathvar, 'stats')
if not os.path.exists(basedir):
os.makedirs(basedir)
fname = os.path.join(basedir, 'mem_u%0.2d'%now.day)
if not os.path.exists(fname):
try:
f = open(fname, 'w')
except:
return
else:
mtime = os.stat(fname).st_mtime
if datetime.datetime.fromtimestamp(mtime) < now - datetime.timedelta(days=1):
os.unlink(fname)
try:
f = open(fname, 'w')
except:
return
else:
try:
f = open(fname, 'a')
except:
return
cmd = ['/usr/sbin/sysctl', '-n', 'hw.pagesize']
(out, err, ret) = justcall(cmd)
if ret != 0:
return
pagesize = int(out.split()[0])
cmd = ['vm_stat']
(out, err, ret) = justcall(cmd)
if ret != 0:
return
h = {}
for line in out.split('\n'):
l = line.split(':')
if len(l) != 2:
continue
key = l[0]
try:
val = int(l[1].strip(' .'))
except:
continue
h[key] = val
f.write(' '.join((now.strftime('%H:%M:%S'),
str(h['Pages free']*pagesize/1024),
str(h['Pages active']*pagesize/1024),
str(h['Pages inactive']*pagesize/1024),
str(h['Pages speculative']*pagesize/1024),
str(h['Pages wired down']*pagesize/1024)
))+'\n')
node.collector.call('push_stats_fs_u', fs_u())
mem_u()
opensvc-1.8~20170412/lib/checkZpoolLinux.py 0000777 0001750 0001750 00000000000 13073467726 024124 2checkZpoolSunOS.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resSyncS3.py 0000644 0001750 0001750 00000022164 13073467726 017244 0 ustar jkelbert jkelbert import os
from rcGlobalEnv import rcEnv
from rcUtilities import which, justcall
from subprocess import *
import rcExceptions as ex
import rcStatus
import time
import datetime
import resSync
import glob
class syncS3(resSync.Sync):
def __init__(self,
rid=None,
src=[],
options=[],
bucket=None,
snar=None,
full_schedule="* sun",
**kwargs):
resSync.Sync.__init__(self, rid=rid, type="sync.s3", **kwargs)
self.label = "s3 backup"
self.src = src
self.bucket = bucket
self.options = options
self.full_schedule = full_schedule
self.snar = snar
def on_add(self):
self.prefix = "/" + self.svc.svcname + "/" + self.rid.replace("#",".")
dst = "s3://"+self.bucket + self.prefix
self.label += " to " + dst
if self.snar is None:
self.snar = os.path.join(rcEnv.pathvar, self.svc.svcname, self.rid.replace("#", "."))+".snar"
def sync_basename(self, n):
return os.path.basename(self.sync_fullname(n))
def sync_fullname(self, n):
s = self.prefix
if n > 0:
s += ".incr"+str(n)
s += ".tar.gz"
return s
def sync_date(self, n):
key = self.sync_basename(n)
try:
e = [ d for d in self.ls() if d["key"] == key ][0]
except:
raise ex.excError("key %s not found in bucket" % key)
try:
_d = datetime.datetime.strptime(e["date"], "%Y-%m-%d %H:%M:%S")
except:
raise ex.excError("undecodable date %s" % e["date"])
return _d
def _status(self, verbose=False):
try:
self.check_bin()
except ex.excError as e:
self.status_log(str(e))
return rcStatus.WARN
try:
l = self.ls(refresh=True)
n = self.get_n_incr()
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
if n is None:
self.status_log("no backup found")
return rcStatus.WARN
if n > 0 and not os.path.exists(self.snar):
self.status_log("snar file not found at %s" % self.snar)
return rcStatus.WARN
try:
last = self.sync_date(n)
except Exception as e:
self.status_log(str(e))
return rcStatus.WARN
if self.sync_date(n) < datetime.datetime.now() - datetime.timedelta(minutes=self.sync_max_delay):
self.status_log("last backup too old (%s)" % last.strftime("%Y-%m-%d %H:%M:%S"))
return rcStatus.WARN
self.status_log("last backup on %s" % last.strftime("%Y-%m-%d %H:%M:%S"))
return rcStatus.UP
def check_bin(self):
if not which("gof3r"):
raise ex.excError("could not find gof3r binary")
if not which("tar"):
raise ex.excError("could not find tar binary")
def sync_full(self):
self.check_bin()
self.tar_full()
def sync_update(self):
self.check_bin()
self.tar()
def ls(self, refresh=False):
"""
list all saves in S3 for this resource
"""
if not refresh and hasattr(self, "ls_cache"):
return self.ls_cache
cmd = ["aws", "s3", "ls", "s3://"+self.bucket+"/"+self.svc.svcname+"/"]
out, err, ret = justcall(cmd)
if ret != 0:
return []
l = []
for line in out.split("\n"):
v = line.split()
if len(v) != 4:
continue
if v[2] == "PRE":
continue
if not v[-1].startswith(self.rid.replace("#", ".")):
continue
d = {
'date': " ".join(v[:2]),
'key': v[-1],
}
l.append(d)
self.ls_cache = l
return self.ls_cache
def get_creds_from_aws(self):
import ConfigParser
aws_cf_f = "/root/.aws/config"
try:
aws_cf = ConfigParser.RawConfigParser()
aws_cf.read(aws_cf_f)
except:
raise ex.excError("failed to load aws config at %s" % aws_cf_f)
if hasattr(self.svc, "aws_profile"):
profile = self.svc.aws_profile
else:
profile = "default"
try:
key = aws_cf.get(profile, "aws_access_key_id")
except:
raise ex.excError("aws_access_key_id not found in section %s of %s" % (profile, aws_cf_f))
try:
secret = aws_cf.get(profile, "aws_secret_access_key")
except:
raise ex.excError("aws_secret_access_key not found in section %s of %s" % (profile, aws_cf_f))
return key, secret
def set_creds(self):
key, secret = self.get_creds_from_aws()
os.environ["AWS_ACCESS_KEY_ID"] = key
os.environ["AWS_SECRET_ACCESS_KEY"] = secret
def unset_creds(self):
if "AWS_ACCESS_KEY_ID" in os.environ:
del(os.environ["AWS_ACCESS_KEY_ID"])
if "AWS_SECRET_ACCESS_KEY" in os.environ:
del(os.environ["AWS_SECRET_ACCESS_KEY"])
def get_n_incr(self):
l = self.ls()
keys = sorted([d["key"] for d in l])
n_incr = None
full_found = False
for i in range(len(keys)):
last = keys[-(i+1)]
if last == self.rid.replace("#", ".") + ".tar.gz":
full_found = True
v = last.split(".")
if len(v) < 3:
continue
if v[-1] == "tar":
incr = v[-2]
elif v[-2] == "tar":
incr = v[-3]
else:
continue
if not incr.startswith("incr"):
continue
incr = incr.replace("incr", "")
try:
n_incr = int(incr)
return n_incr
except:
continue
if full_found:
return 0
return n_incr
def remove_incr(self):
cmd = ["aws", "s3", "rm"]
keys = [ d["key"] for d in self.ls() ]
for key in keys:
if not key.startswith(os.path.basename(self.prefix) + ".incr"):
continue
self.vcall(cmd + ["s3://"+self.bucket+os.path.dirname(self.prefix)+"/"+key])
def in_full_schedule(self):
from rcScheduler import Scheduler, SchedNotAllowed, SchedSyntaxError
sched = Scheduler()
schedule = sched.sched_get_schedule("dummy", "dummy", schedules=self.full_schedule)
try:
sched.in_schedule(schedule, now=datetime.datetime.now())
except SchedNotAllowed:
return False
except SchedSyntaxError as e:
raise ex.excError(str(e))
return True
def tar(self):
n_incr = self.get_n_incr()
if n_incr is None:
self.log.info("first backup")
self.tar_full()
elif self.in_full_schedule():
self.log.info("in schedule for a full backup")
self.tar_full()
else:
self.tar_incr(n_incr+1)
def tar_full(self):
if os.path.exists(self.snar):
self.log.info("full backup, removing snar file")
self.log.info("rm " + self.snar)
os.unlink(self.snar)
else:
self.log.info("full backup, no snar file found at %s" % self.snar)
self.do_tar()
self.remove_incr()
def tar_incr(self, n):
if os.path.exists(self.snar):
self.log.info("incremental backup, using snar file")
else:
self.log.info("full backup, no snar file found at %s" % self.snar)
self.do_tar(n=n)
def do_tar(self, n=None):
self.set_creds()
paths = []
for e in self.src:
paths += glob.glob(e)
cmd1 = ["tar", "czf", "-", "-g", self.snar] + self.options + paths
p1 = Popen(cmd1, stdout=PIPE, stderr=PIPE)
cmd2 = ["gof3r", "put", "-b", self.bucket, "-k", self.sync_fullname(n)]
p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
self.log.info(" ".join(cmd1) + " | " + " ".join(cmd2))
out, err = p2.communicate()
self.unset_creds()
if len(out) > 0:
self.log.info(out)
if p2.returncode != 0:
if len(err) > 0:
self.log.error(err)
def do_tar_x(self, n=None):
self.set_creds()
paths = []
cmd1 = ["gof3r", "get", "-b", self.bucket, "-k", self.sync_fullname(n)]
p1 = Popen(cmd1, stdout=PIPE, stderr=PIPE)
cmd2 = ["tar", "xzf", "-", "-g", self.snar, "-C", "/"]
p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
self.log.info(" ".join(cmd1) + " | " + " ".join(cmd2))
out, err = p2.communicate()
self.unset_creds()
if len(out) > 0:
self.log.info(out)
if p2.returncode != 0:
if len(err) > 0:
self.log.error(err)
def sync_restore(self):
n = self.get_n_incr()
for i in range(n):
self.do_tar_x(i)
def __str__(self):
return "%s src=%s bucket=%s" % (resSync.Sync.__str__(self), str(self.src), str(self.bucket))
opensvc-1.8~20170412/lib/checkVgUsageAIX.py 0000644 0001750 0001750 00000002347 13073467726 020312 0 ustar jkelbert jkelbert import checks
from rcUtilities import justcall
class check(checks.check):
chk_type = "vg_u"
def find_svc(self, vgname):
for svc in self.svcs:
for resource in svc.get_resources('disk.vg'):
if resource.name == vgname:
return svc.svcname
return ''
def do_check(self):
r = []
cmd = ['lsvg']
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
vgs = out.split('\n')
for vg in vgs:
r += self._do_check(vg)
return r
def _do_check(self, vg):
cmd = ['lsvg', '-p', vg]
out, err, ret = justcall(cmd)
if ret != 0:
return self.undef
lines = out.split('\n')
if len(lines) < 3:
return self.undef
r = []
for line in lines[2:]:
l = line.split()
if len(l) != 5:
continue
size = int(l[2])
free = int(l[3])
val = int(100*(size-free)/size)
r.append({'chk_instance': vg,
'chk_value': str(val),
'chk_svcname': self.find_svc(l[0]),
}
)
return r
opensvc-1.8~20170412/lib/provisioning.py 0000644 0001750 0001750 00000001574 13073467726 020140 0 ustar jkelbert jkelbert class Provisioning(object):
def __init__(self, r):
self.r = r
def validate(self):
return True
def unprovisioner(self):
self.r.stop()
return True
def provisioner(self):
return True
def remove_keywords(self, keywords=[]):
for kw in keywords:
self.remove_keyword(kw, write=False)
self.r.svc.write_config()
def remove_keyword(self, keyword, write=True):
for o in self.r.svc.config.options(self.r.rid):
if o != keyword and not o.startswith(keyword+"@"):
continue
self.r.log.info("comment out provisioning keyword: %s" % o)
val = self.r.svc.config.get(self.r.rid, o)
self.r.svc.config.remove_option(self.r.rid, o)
self.r.svc.config.set(self.r.rid, "#"+o, val)
if write:
self.r.svc.write_config()
opensvc-1.8~20170412/lib/rcColor.py 0000644 0001750 0001750 00000017064 13073467726 017016 0 ustar jkelbert jkelbert from __future__ import print_function
import os
import sys
import platform
import rcExceptions as ex
from rcUtilities import is_string
use_color = "auto"
class color:
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
BROWN = '\033[33m'
BLUE = '\033[34m'
PURPLE = '\033[35m'
CYAN = '\033[36m'
GRAY = '\033[37m'
DARKGRAY = '\033[90m'
LIGHTRED = '\033[91m'
LIGHTGREEN = '\033[92m'
YELLOW = '\033[93m'
LIGHTBLUE = '\033[94m'
LIGHTPURPLE = '\033[95m'
LIGHTCYAN = '\033[96m'
WHITE = '\033[97m'
BGBLACK = '\033[40m'
BGRED = '\033[41m'
BGGREEN = '\033[42m'
BGYELLOW = '\033[43m'
BGBLUE = '\033[44m'
BGPURPLE = '\033[45m'
BGCYAN = '\033[46m'
BGWHITE = '\033[47m'
BGDEFAULT = '\033[49m'
BGGRAY = '\033[100m'
E_BGODD = '\033[48;2;240;240;205m'
E_BGCYAN = '\033[48;2;125;205;205m'
def ansi_colorize(s, c=None):
global use_color
if c is None:
return s
if use_color in ("never", "no") or (use_color == "auto" and not os.isatty(1)):
return s
return c + s + color.END
def win_colorize(s, c=None):
return s
if platform.system() == 'Windows':
colorize = win_colorize
else:
colorize = ansi_colorize
def colorize_json(s):
import re
from rcStatus import colorize_status
s = re.sub(r'(")(error|ok|err|up|down|warn|n/a|stdby up|stdby down)(")', lambda m: m.group(1)+colorize_status(m.group(2), lpad=0)+m.group(3), s)
s = re.sub(r'((?!"DEFAULT":)("[\w: ,@-]+":))', colorize(r'\1', color.LIGHTBLUE), s)
s = re.sub(r'("DEFAULT":)( {)', colorize(r'\1', color.BROWN)+r'\2', s)
s = re.sub(r'("[\w:-]+#[\w:-]+":)( {)', colorize(r'\1', color.BROWN)+r'\2', s)
s = re.sub(r'(@[\w-]+)(":)', colorize(r'\1', color.RED)+colorize(r'\2', color.LIGHTBLUE), s)
s = re.sub(r'({.+})', colorize(r'\1', color.GREEN), s)
return s
def format_json(d):
import json
kwargs = {
"ensure_ascii": False,
"indent": 4,
"separators": (',', ': '),
}
if sys.version_info[0] < 3:
kwargs["encoding"] = "utf8"
print(colorize_json(json.dumps(d, **kwargs)))
def format_table(d):
from rcPrintTable import print_table_tabulate
print_table_tabulate(d)
def format_default(d):
from rcPrintTable import print_table_default
if "error" in d and is_string(d["error"]):
print(d["error"], file=sys.stderr)
print_table_default(d)
def format_csv(d):
from rcPrintTable import print_table_csv
print_table_csv(d)
def is_list_of_list(d):
if type(d) != list:
return False
if len(d) == 2 and type(d[0]) == list and type(d[1]) == list:
return True
if len(d) > 0 and type(d[0]) == list:
return True
return False
def is_list_of_dict(d):
if type(d) != list:
return False
if len(d) == 0:
return False
for e in d:
if type(e) != dict:
return False
return True
def is_dict_of_list(d):
if type(d) != dict:
return False
for k, v in d.items():
if not is_list_of_list(v):
return False
return True
def is_dict_of_list_of_dict(d):
if type(d) != dict:
return False
for k, v in d.items():
if not is_list_of_dict(v):
return False
return True
def is_dict_of_list_of_list(d):
if type(d) != dict:
return False
for k, v in d.items():
if not is_list_of_list(v):
return False
return True
def flatten_list(data):
for idx, entry in enumerate(data):
if not isinstance(entry, dict):
continue
for key, val in entry.items():
if not isinstance(val, dict):
continue
for _key, _val in val.items():
agg_key = key + "." + _key
data[idx][agg_key] = _val
del data[idx][key]
return data
def xform_data_for_tabular(d):
if is_list_of_dict(d):
return _xform_ld_data_for_tabular(d)
if is_dict_of_list_of_dict(d):
return _xform_dld_data_for_tabular(d)
if is_dict_of_list_of_list(d):
return _xform_dll_data_for_tabular(d)
return d
def _xform_dll_data_for_tabular(d):
l = []
for k, v in d.items():
if len(v) == 0:
continue
v[0].insert(0, "service")
for i, e in enumerate(v[1:]):
v[i+1].insert(0, k)
if len(l) == 0:
l += v
else:
l += v[1:]
return l
def _xform_dld_data_for_tabular(d):
l = []
for k, v in d.items():
if len(l) == 0:
l += _xform_ld_data_for_tabular(v, include_header=True, prepend=("service", k))
else:
l += _xform_ld_data_for_tabular(v, include_header=False, prepend=("service", k))
return l
def _xform_ld_data_for_tabular(d, include_header=True, prepend=None):
d = flatten_list(d)
l = []
if include_header:
header = d[0].keys()
if prepend:
header.insert(0, prepend[0])
l += [header]
for e in d:
values = e.values()
if prepend:
values.insert(0, prepend[1])
l.append(values)
return l
def xform_data_for_json(d):
if is_list_of_list(d):
return _xform_data_for_json(d)
if is_dict_of_list(d):
for k in d:
d[k] = _xform_data_for_json(d[k])
return d
def _xform_data_for_json(d):
if len(d) < 2:
return []
l = []
titles = d[0]
for _d in d[1:]:
h = {}
for a, b in zip(titles, _d):
h[a] = b
l.append(h)
return l
def formatter(fn):
def decorator(*args, **kwargs):
fmt = args[0].options.format
if fmt == "json":
_fmt = format_json
elif fmt == "table":
_fmt = format_table
elif fmt == "csv":
_fmt = format_csv
elif fmt is None:
_fmt = format_default
elif hasattr(fmt, "__call__"):
_fmt = fmt
else:
raise ex.excError("unsupported output format: %s" % str(fmt))
data = fn(*args, **kwargs)
if fmt == "json":
data = xform_data_for_json(data)
elif fmt in ("table", "csv", None):
data = xform_data_for_tabular(data)
if data is None:
return
if type(data) in (int, float):
return
if len(data) == 0:
return
if not isinstance(data, (dict, list)):
print(data)
return
_fmt(data)
if "error" in data:
return 1
return decorator
def print_color_config(fpath):
"""
Colorize and print the content of the file passed as argument.
"""
from rcColor import colorize, color
import re
def highlighter(line):
"""
Colorize interesting parts to help readability
"""
line = line.rstrip("\n")
if re.match(r'\[.+\]', line):
return colorize(line, color.BROWN)
line = re.sub(
r"({[\.\w\-_#{}\[\]()\$\+]+})",
colorize(r"\1", color.GREEN),
line
)
line = re.sub(
r"^(\s*\w+\s*)=",
colorize(r"\1", color.LIGHTBLUE)+"=",
line
)
line = re.sub(
r"^(\s*\w+)(@\w+\s*)=",
colorize(r"\1", color.LIGHTBLUE)+colorize(r"\2", color.RED)+"=",
line
)
return line
try:
with open(fpath, 'r') as ofile:
for line in ofile.readlines():
print(highlighter(line))
except Exception as exc:
raise ex.excError(exc)
opensvc-1.8~20170412/lib/checkRaidSmartArrayLinux.py 0000777 0001750 0001750 00000000000 13073467726 026540 2checkRaidSmartArray.py ustar jkelbert jkelbert opensvc-1.8~20170412/lib/resContainerKvm.py 0000644 0001750 0001750 00000014144 13073467726 020521 0 ustar jkelbert jkelbert import rcStatus
import resources as Res
import time
import os
import rcExceptions as ex
from rcGlobalEnv import rcEnv
from rcUtilities import qcall
from rcUtilitiesLinux import check_ping
import resContainer
class Kvm(resContainer.Container):
startup_timeout = 180
shutdown_timeout = 120
def __init__(self,
rid,
name,
guestos=None,
osvc_root_path=None,
**kwargs):
resContainer.Container.__init__(self,
rid=rid,
name=name,
type="container.kvm",
guestos=guestos,
osvc_root_path=osvc_root_path,
**kwargs)
self.cf = os.path.join(os.sep, 'etc', 'libvirt', 'qemu', name+'.xml')
def __str__(self):
return "%s name=%s" % (Res.Resource.__str__(self), self.name)
def list_kvmconffiles(self):
if os.path.exists(self.cf):
return [self.cf]
return []
def files_to_sync(self):
return self.list_kvmconffiles()
def check_capabilities(self):
cmd = ['virsh', 'capabilities']
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
self.status_log("can not fetch capabilities")
return False
if 'hvm' not in out:
self.status_log("hvm not supported by host")
return False
return True
def ping(self):
return check_ping(self.addr, timeout=1, count=1)
def container_start(self):
if not os.path.exists(self.cf):
self.log.error("%s not found"%self.cf)
raise ex.excError
cmd = ['virsh', 'define', self.cf]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
cmd = ['virsh', 'start', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_stop(self):
cmd = ['virsh', 'shutdown', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def container_forcestop(self):
cmd = ['virsh', 'destroy', self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.excError
def is_up_on(self, nodename):
return self.is_up(nodename)
def is_up(self, nodename=None):
cmd = ['virsh', 'dominfo', self.name]
if nodename is not None:
cmd = rcEnv.rsh.split() + [nodename] + cmd
(ret, out, err) = self.call(cmd, errlog=False)
if ret != 0:
return False
if "running" in out.split():
return True
return False
def get_container_info(self):
cmd = ['virsh', 'dominfo', self.name]
(ret, out, err) = self.call(cmd, errlog=False, cache=True)
self.info = {'vcpus': '0', 'vmem': '0'}
if ret != 0:
return self.info
for line in out.split('\n'):
if "CPU(s):" in line: self.info['vcpus'] = line.split(':')[1].strip()
if "Used memory:" in line: self.info['vmem'] = line.split(':')[1].strip()
return self.info
def check_manual_boot(self):
cf = os.path.join(os.sep, 'etc', 'libvirt', 'qemu', 'autostart', self.name+'.xml')
if os.path.exists(cf):
return False
return True
def install_drp_flag(self):
flag_disk_path = os.path.join(rcEnv.pathvar, 'drp_flag.vdisk')
from xml.etree.ElementTree import ElementTree, SubElement
tree = ElementTree()
tree.parse(self.cf)
""" create the vdisk if it does not exist yet
"""
if not os.path.exists(flag_disk_path):
with open(flag_disk_path, 'w') as f:
f.write('')
f.close()
""" check if drp flag is already set up
"""
for disk in tree.getiterator("disk"):
e = disk.find('source')
if e is None:
continue
(dev, path) = e.items()[0]
if path == flag_disk_path:
self.log.info("flag virtual disk already exists")
return
""" add vdisk to the vm xml config
"""
self.log.info("install drp flag virtual disk")
devices = tree.find("devices")
e = SubElement(devices, "disk", {'device': 'disk', 'type': 'file'})
SubElement(e, "driver", {'name': 'qemu'})
SubElement(e, "source", {'file': flag_disk_path})
SubElement(e, "target", {'bus': 'virtio', 'dev': 'vdosvc'})
tree.write(self.cf)
def provision(self):
m = __import__("provKvm")
prov = m.ProvisioningKvm(self)
prov.provisioner()
def devlist(self):
if hasattr(self, 'devs') and self.devs != set():
return self.devs
devmapping = self.devmap()
self.devs = set(map(lambda x: x[0], devmapping))
return self.devs
def disklist(self):
if hasattr(self, 'disks'):
return self.disks
if not hasattr(self, 'devs'):
self.devlist()
devps = self.devs
try:
u = __import__('rcUtilities'+rcEnv.sysname)
self.disks = u.devs_to_disks(self, devps)
except:
self.disks = devps
return self.disks
def devmap(self):
if hasattr(self, "devmapping"):
return self.devmapping
self.devmapping = []
from xml.etree.ElementTree import ElementTree, SubElement
tree = ElementTree()
tree.parse(self.cf)
for dev in tree.getiterator('disk'):
s = dev.find('source')
if s is None:
continue
if 'dev' not in s.attrib:
continue
src = s.attrib['dev']
s = dev.find('target')
if s is None:
continue
if 'dev' not in s.attrib:
continue
dst = s.attrib['dev']
self.devmapping.append((src, dst))
return self.devmapping
opensvc-1.8~20170412/lib/svc.py 0000644 0001750 0001750 00000515030 13073467726 016202 0 ustar jkelbert jkelbert """
The module defining the Svc class.
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import signal
import logging
import datetime
import lock
from resources import Resource
from resourceset import ResourceSet
from freezer import Freezer
import rcStatus
from rcGlobalEnv import rcEnv, get_osvc_paths, Storage
from rcUtilities import justcall, lazy, vcall, is_string, try_decode
from rcConfigParser import RawConfigParser
from svcBuilder import conf_get_string_scope, conf_get_boolean_scope, get_pg_settings
import rcExceptions as ex
import rcLogger
import node
from rcScheduler import scheduler_fork, Scheduler, SchedOpts
if sys.version_info[0] < 3:
BrokenPipeError = IOError
def signal_handler(*args):
"""
A signal handler raising the excSignal exception.
Args can be signum and frame, but we don't use them.
"""
raise ex.excSignal
DEFAULT_WAITLOCK = 60
DEFAULT_STATUS_GROUPS = [
"container",
"ip",
"disk",
"fs",
"share",
"sync",
"app",
"hb",
"stonith",
"task",
]
CONFIG_DEFAULTS = {
'push_schedule': '00:00-06:00@361',
'sync_schedule': '04:00-06:00@121',
'comp_schedule': '00:00-06:00@361',
'status_schedule': '@9',
'monitor_schedule': '@1',
'resinfo_schedule': '@60',
'no_schedule': '',
}
ACTIONS_ALLOW_ON_FROZEN = [
"autopush",
"delete",
"disable",
"edit_config",
"enable",
"freeze",
"frozen",
"get",
"json_config",
"json_status",
"json_disklist",
"json_devlist",
"logs",
"print_config",
"print_devlist",
"print_disklist",
"print_config_mtime",
"print_resource_status",
"print_schedule",
"print_status",
"push",
"push_resinfo",
"push_config",
"push_service_status",
"prstatus",
"scheduler",
"set",
"status",
"thaw",
"update",
"unset",
"validate_config",
]
ACTIONS_ALLOW_ON_INVALID_NODE = [
"delete",
"edit_config",
"frozen",
"get",
"logs",
"print_config",
"set",
"unset",
"update",
"validate_config",
]
ACTIONS_ALLOW_ON_CLUSTER = ACTIONS_ALLOW_ON_FROZEN + [
"boot",
"docker",
"dns_update",
"postsync",
"presync",
"resource_monitor",
"startstandby",
"sync_all",
"sync_drp",
"sync_nodes",
"toc",
"validate_config",
]
ACTIONS_NO_LOG = [
"delete",
"edit_config",
"get",
"group_status",
"logs",
"push",
"push_resinfo",
"push_config",
"push_service_status",
"resource_monitor",
"scheduler",
"set",
"status",
"unset",
"validate_config",
]
ACTIONS_NO_TRIGGER = [
"delete",
"dns_update",
"enable",
"disable",
"status",
"scheduler",
"pg_freeze",
"pg_thaw",
"pg_kill",
"logs",
"edit_config",
"push_resinfo",
"push",
"group_status",
"presync",
"postsync",
"freezestop",
"resource_monitor",
]
ACTIONS_NO_LOCK = [
"docker",
"edit_config",
"freeze",
"freezestop",
"frozen",
"get",
"logs",
"push",
"push_resinfo",
"push_config",
"push_service_status",
"run",
"scheduler",
"status",
"thaw",
"toc",
"validate_config",
]
DISK_TYPES = [
"disk.drbd",
"disk.gandi",
"disk.gce",
"disk.lock",
"disk.loop",
"disk.md",
"disk.rados",
"disk.raw",
"disk.vg",
"disk.zpool",
]
STATUS_TYPES = [
"app",
"container.amazon",
"container.docker",
"container.esx",
"container.hpvm",
"container.jail",
"container.kvm",
"container.lxc",
"container.ldom",
"container.openstack",
"container.ovm",
"container.srp",
"container.vbox",
"container.vcloud",
"container.vz",
"container.xen",
"container.zone",
"disk.drbd",
"disk.gandi",
"disk.gce",
"disk.lock",
"disk.loop",
"disk.md",
"disk.lv",
"disk.raw",
"disk.rados",
"disk.scsireserv",
"disk.vg",
"disk.zpool",
"fs",
"hb.linuxha",
"hb.openha",
"hb.ovm",
"hb.rhcs",
"hb.sg",
"hb.vcs",
"ip",
"ip.docker",
"share.nfs",
"sync.btrfs",
"sync.btrfssnap",
"sync.dcsckpt",
"sync.dcssnap",
"sync.dds",
"sync.docker",
"sync.evasnap",
"sync.hp3par",
"sync.hp3parsnap",
"sync.ibmdssnap",
"sync.necismsnap",
"sync.netapp",
"sync.nexenta",
"sync.rados",
"sync.rsync",
"sync.symclone",
"sync.symsnap",
"sync.symsrdfs",
"sync.s3",
"sync.zfs",
"stonith.callout",
"stonith.ilo",
"task",
]
ACTIONS_DO_MASTER_AND_SLAVE = [
"boot",
"migrate",
"prstart",
"prstop",
"restart",
"shutdown",
"start",
"startstandby",
"stop",
"stopstandby",
"switch",
]
ACTIONS_NEED_SNAP_TRIGGER = [
"sync_drp",
"sync_nodes",
"sync_resync",
"sync_update",
]
CLUSTER_TYPES = [
"failover",
"flex",
"autoflex",
]
os.environ['LANG'] = 'C'
def _slave_action(func):
def need_specifier(self):
"""
Raise an exception if --master or --slave(s) need to be set
"""
if self.command_is_scoped():
return
if self.running_action in ACTIONS_DO_MASTER_AND_SLAVE:
return
if self.options.master or self.options.slaves or self.options.slave is not None:
return
raise ex.excError("specify either --master, --slave(s) or both (%s)" % func.__name__)
def _func(self):
if self.encap or not self.has_encap_resources:
return
if self.command_is_scoped() and \
len(set(self.action_rid) & set(self.encap_resources.keys())) == 0:
self.log.info("skip action on slaves: no encap resources are selected")
return
need_specifier(self)
if self.options.slaves or \
self.options.slave is not None or \
(not self.options.master and not self.options.slaves and self.options.slave is None):
try:
func(self)
except Exception as exc:
raise ex.excError(str(exc))
return _func
def _master_action(func):
def need_specifier(self):
"""
Raise an exception if --master or --slave(s) need to be set
"""
if self.encap:
return
if not self.has_encap_resources:
return
if self.command_is_scoped():
return
if self.running_action in ACTIONS_DO_MASTER_AND_SLAVE:
return
if self.options.master or self.options.slaves or self.options.slave is not None:
return
raise ex.excError("specify either --master, --slave(s) or both (%s)" % func.__name__)
def _func(self):
need_specifier(self)
if self.options.master or \
(not self.options.master and not self.options.slaves and self.options.slave is None):
func(self)
return _func
class Svc(object):
"""
A OpenSVC service class.
A service is a collection of resources.
It exposes operations methods like provision, unprovision, stop, start,
and sync.
"""
def __init__(self, svcname=None):
self.type = "hosted"
self.svcname = svcname
self.hostid = rcEnv.nodename
self.paths = Storage(
cf=os.path.join(rcEnv.pathetc, self.svcname+'.conf'),
push_flag=os.path.join(rcEnv.pathvar, self.svcname, 'last_pushed_config'),
run_flag=os.path.join(os.sep, "var", "run", "opensvc."+self.svcname),
)
self.resources_by_id = {}
self.encap_resources = {}
self.resourcesets = []
self.resourcesets_by_type = {}
self.disks = set()
self.devs = set()
self.ref_cache = {}
self.encap_json_status_cache = {}
self.rset_status_cache = None
self.lockfd = None
self.group_status_cache = None
self.abort_start_done = False
self.action_start_date = datetime.datetime.now()
self.ha = False
self.has_encap_resources = False
self.encap = False
self.action_rid = []
self.action_rid_before_depends = []
self.action_rid_depends = []
self.dependencies = {}
self.running_action = None
self.config = None
self.need_postsync = set()
# set by the builder
self.conf = os.path.join(rcEnv.pathetc, svcname+".conf")
self.node = None
self.comment = ""
self.drp_type = ""
self.app = ""
self.drnoaction = False
self.clustertype = "failover"
self.show_disabled = False
self.svc_env = rcEnv.node_env
self.nodes = set([rcEnv.nodename])
self.drpnodes = set()
self.drpnode = ""
self.encapnodes = set()
self.flex_primary = ""
self.drp_flex_primary = ""
self.sync_dblogger = False
self.create_pg = False
self.disable_rollback = False
self.presync_done = False
self.presnap_trigger = None
self.postsnap_trigger = None
self.monitor_action = None
self.disabled = False
self.anti_affinity = None
self.autostart_node = []
self.lock_timeout = DEFAULT_WAITLOCK
# merged by the cmdline parser
self.options = Storage(
color="auto",
slaves=False,
slave=None,
master=False,
cron=False,
force=False,
remote=False,
ignore_affinity=False,
debug=False,
disable_rollback=False,
show_disabled=None,
moduleset="",
module="",
ruleset_date="",
dry_run=False,
refresh=False,
rid=None,
tags=None,
subsets=None,
discard=False,
recover=False,
waitlock=DEFAULT_WAITLOCK,
)
self.log = rcLogger.initLogger(self.svcname)
self.scsirelease = self.prstop
self.scsireserv = self.prstart
self.scsicheckreserv = self.prstatus
@lazy
def sched(self):
"""
Lazy init of the service scheduler.
"""
return Scheduler(
name=self.svcname,
config_defaults=CONFIG_DEFAULTS,
options=self.options,
config=self.config,
log=self.log,
svc=self,
scheduler_actions={
"compliance_auto": SchedOpts(
"DEFAULT",
fname=self.svcname+os.sep+"last_comp_check",
schedule_option="comp_schedule"
),
"push_service_status": SchedOpts(
"DEFAULT",
fname=self.svcname+os.sep+"last_push_service_status",
schedule_option="status_schedule"
),
},
)
@lazy
def dockerlib(self):
"""
Lazy allocator for the dockerlib object.
"""
import rcDocker
return rcDocker.DockerLib(self)
@lazy
def freezer(self):
"""
Lazy allocator for the freezer object.
"""
return Freezer(self.svcname)
@lazy
def compliance(self):
from compliance import Compliance
comp = Compliance(self)
return comp
def __lt__(self, other):
"""
Order by service name
"""
return self.svcname < other.svcname
def register_dependency(self, action, _from, _to):
if action not in self.dependencies:
self.dependencies[action] = {}
if _from not in self.dependencies[action]:
self.dependencies[action][_from] = set()
self.dependencies[action][_from].add(_to)
def action_rid_dependencies(self, action, rid):
if action in ("boot", "provision", "start"):
action = "start"
elif action in ("shutdown", "unprovision", "stop"):
action = "stop"
else:
return set()
if action not in self.dependencies:
return set()
if rid not in self.dependencies[action]:
return set()
return self.dependencies[action][rid]
def action_rid_dependency_of(self, action, rid):
if action in ("boot", "provision", "start"):
action = "start"
elif action in ("shutdown", "unprovision", "stop"):
action = "stop"
else:
return set()
if action not in self.dependencies:
return set()
dependency_of = set()
for _rid, dependencies in self.dependencies[action].items():
if rid in dependencies:
dependency_of.add(_rid)
return dependency_of
def print_schedule(self):
"""
The 'print schedule' node and service action entrypoint.
"""
return self.sched.print_schedule()
def scheduler(self):
"""
The service scheduler action entrypoint.
"""
self.options.cron = True
self.sync_dblogger = True
if not self.has_run_flag():
self.log.info("the scheduler is off during init")
return
for action in self.sched.scheduler_actions:
try:
if action == "sync_all":
# save the action logging to the collector if sync_all
# is not needed
self.sched_sync_all()
elif action.startswith("task#"):
self.run_task(action)
elif action == "compliance_auto":
self.compliance_auto()
else:
self.action(action)
except ex.excError as exc:
self.log.error(exc)
except:
self.save_exc()
def post_build(self):
"""
A method run after the service is done building.
Add resource-dependent tasks to the scheduler.
"""
if not self.encap:
self.sched.scheduler_actions["push_config"] = SchedOpts(
"DEFAULT",
fname=self.svcname+os.sep+"last_push_config",
schedule_option="push_schedule"
)
try:
monitor_schedule = conf_get_string_scope(self, self.config, 'DEFAULT', 'monitor_schedule')
except ex.OptNotFound:
monitor_schedule = None
if (self.ha and "flex" not in self.clustertype) or monitor_schedule is not None:
self.sched.scheduler_actions["resource_monitor"] = SchedOpts(
"DEFAULT",
fname=self.svcname+os.sep+"last_resource_monitor",
schedule_option="monitor_schedule"
)
syncs = []
for resource in self.get_resources("sync"):
syncs += [SchedOpts(
resource.rid,
fname=self.svcname+os.sep+"last_syncall_"+resource.rid,
schedule_option="sync_schedule"
)]
if len(syncs) > 0:
self.sched.scheduler_actions["sync_all"] = syncs
for resource in self.get_resources("task"):
self.sched.scheduler_actions[resource.rid] = SchedOpts(
resource.rid,
fname=self.svcname+os.sep+"last_"+resource.rid,
schedule_option="no_schedule"
)
self.sched.scheduler_actions["push_resinfo"] = SchedOpts(
"DEFAULT",
fname=self.svcname+os.sep+"last_push_resinfo",
schedule_option="resinfo_schedule"
)
def purge_status_last(self):
"""
Purge all service resources on-disk status caches.
"""
for rset in self.resourcesets:
rset.purge_status_last()
def get_subset_parallel(self, rtype):
"""
Return True if the resources of a resourceset can run an action in
parallel executing per-resource workers.
"""
rtype = rtype.split(".")[0]
subset_section = 'subset#' + rtype
if self.config is None:
self.load_config()
if not self.config.has_section(subset_section):
return False
try:
return conf_get_boolean_scope(self, self.config, subset_section, "parallel")
except ex.OptNotFound:
return False
def __iadd__(self, other):
"""
Svc += ResourceSet
Svc += Resource
"""
if hasattr(other, 'resources'):
# new ResourceSet or ResourceSet-derived class
self.resourcesets.append(other)
self.resourcesets_by_type[other.type] = other
other.svc = self
return self
if other.subset is not None:
# the resource wants to be added to a specific resourceset
# for action grouping, parallel execution or sub-resource
# triggers
base_type = other.type.split(".")[0]
rtype = "%s:%s" % (base_type, other.subset)
else:
rtype = other.type
if rtype in self.resourcesets_by_type:
# the resource set already exists. add resource or resourceset.
self.resourcesets_by_type[rtype] += other
elif isinstance(other, Resource):
parallel = self.get_subset_parallel(rtype)
if hasattr(other, 'rset_class'):
rset = other.rset_class(type=rtype, resources=[other], parallel=parallel)
else:
rset = ResourceSet(type=rtype, resources=[other], parallel=parallel)
rset.rid = rtype
rset.svc = self
rset.pg_settings = get_pg_settings(self, "subset#"+rtype)
self.__iadd__(rset)
else:
self.log.debug("unexpected object addition to the service: %s",
str(other))
if isinstance(other, Resource) and other.rid and "#" in other.rid:
self.resources_by_id[other.rid] = other
other.svc = self
if other.type.startswith("hb"):
self.ha = True
if not other.disabled and hasattr(other, "on_add"):
other.on_add()
return self
def dblogger(self, action, begin, end, actionlogfile):
"""
Send to the collector the service status after an action, and
the action log.
"""
self.node.collector.call(
'end_action', self, action, begin, end, actionlogfile,
sync=self.sync_dblogger
)
g_vars, g_vals, r_vars, r_vals = self.svcmon_push_lists()
self.node.collector.call(
'svcmon_update_combo', g_vars, g_vals, r_vars, r_vals,
sync=self.sync_dblogger
)
os.unlink(actionlogfile)
try:
logging.shutdown()
except:
pass
def svclock(self, action=None, timeout=30, delay=1):
"""
Acquire the service action lock.
"""
suffix = None
if action in ACTIONS_NO_LOCK or \
action.startswith("collector") or \
self.lockfd is not None:
# explicitly blacklisted or
# no need to serialize requests or
# already acquired
return
if action.startswith("compliance"):
# compliance modules are allowed to execute actions on the service
# so give them their own lock
suffix = "compliance"
elif action.startswith("sync"):
suffix = "sync"
lockfile = os.path.join(rcEnv.pathlock, self.svcname)
if suffix is not None:
lockfile = ".".join((lockfile, suffix))
details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
(timeout, delay, action, lockfile)
self.log.debug("acquire service lock %s", details)
try:
lockfd = lock.lock(
timeout=timeout,
delay=delay,
lockfile=lockfile,
intent=action
)
except lock.lockTimeout as exc:
raise ex.excError("timed out waiting for lock %s: %s" % (details, str(exc)))
except lock.lockNoLockFile:
raise ex.excError("lock_nowait: set the 'lockfile' param %s" % details)
except lock.lockCreateError:
raise ex.excError("can not create lock file %s" % details)
except lock.lockAcquire as exc:
raise ex.excError("another action is currently running %s: %s" % (details, str(exc)))
except ex.excSignal:
raise ex.excError("interrupted by signal %s" % details)
except Exception as exc:
self.save_exc()
raise ex.excError("unexpected locking error %s: %s" % (details, str(exc)))
if lockfd is not None:
self.lockfd = lockfd
def svcunlock(self):
"""
Release the service action lock.
"""
lock.unlock(self.lockfd)
self.lockfd = None
@staticmethod
def setup_signal_handlers():
"""
Install signal handlers.
"""
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def get_resources(self, _type=None, strict=False, discard_disabled=True):
"""
Return the list of resources matching criteria.
"""
if _type is None:
rsets = self.resourcesets
else:
rsets = self.get_resourcesets(_type, strict=strict)
resources = []
for rset in rsets:
for resource in rset.resources:
if not self.encap and 'encap' in resource.tags:
continue
if discard_disabled and resource.disabled:
continue
resources.append(resource)
return resources
def get_resourcesets(self, _type, strict=False):
"""
Return the list of resourceset matching the specified types.
"""
if not isinstance(_type, list):
_types = [_type]
else:
_types = _type
rsets_by_type = {}
for rset in self.resourcesets:
if ':' in rset.type and rset.has_resource_with_types(_types, strict=strict):
# subset
rsets_by_type[rset.type] = rset
continue
rs_base_type = rset.type.split(".")[0]
if rset.type in _types:
# exact match
if rs_base_type not in rsets_by_type:
rsets_by_type[rs_base_type] = type(rset)(type=rs_base_type)
rsets_by_type[rs_base_type].svc = self
rsets_by_type[rs_base_type] += rset
elif rs_base_type in _types and not strict:
# group match
if rs_base_type not in rsets_by_type:
rsets_by_type[rs_base_type] = type(rset)(type=rs_base_type)
rsets_by_type[rs_base_type].svc = self
rsets_by_type[rs_base_type] += rset
rsets = list(rsets_by_type.values())
rsets.sort()
return rsets
def has_resourceset(self, _type, strict=False):
"""
Return True if the service has a resource set of the specified type.
"""
return len(self.get_resourcesets(_type, strict=strict)) > 0
def all_set_action(self, action=None, tags=None):
"""
Execute an action on all resources all resource sets.
"""
self.set_action(self.resourcesets, action=action, tags=tags)
def sub_set_action(self, _type=None, action=None, tags=None, xtags=None,
strict=False):
"""
Execute an action on all resources of the resource sets of the
specified type.
"""
rsets = self.get_resourcesets(_type, strict=strict)
self.set_action(rsets, action=action, tags=tags, xtags=xtags)
def need_snap_trigger(self, rsets, action):
"""
Return True if the action is a sync action and at least one of the
specified resource sets has a resource requiring a snapshot.
"""
if action not in ACTIONS_NEED_SNAP_TRIGGER:
return False
for rset in rsets:
for resource in rset.resources:
# avoid to run pre/post snap triggers when there is no
# resource flagged for snap and on drpnodes
if hasattr(resource, "snap") and resource.snap is True and \
rcEnv.nodename in self.nodes:
return True
return False
def set_action(self, rsets=None, action=None, tags=None, xtags=None):
"""
Call the action on all sets sorted resources.
If the sets define a pre_snap trigger run that before the action.
If the sets define a pre_ trigger run that before the action.
If the sets define a post_ trigger run that after the action.
"""
if rsets is None:
rsets = []
if tags is None:
tags = set()
if xtags is None:
xtags = set()
def do_trigger(when):
"""
Excecute a trigger on each resource of the set,
if the action allows triggers.
If a trigger raises,
* excError, stop looping over the resources and propagate up
to the caller.
* excAbortAction, continue looping over the resources
* any other exception, save the traceback in the debug log
and stop looping over the resources and raise an excError
"""
for rset in rsets:
if action in ACTIONS_NO_TRIGGER or rset.all_skip(action):
break
try:
rset.log.debug("start %s %s_action", rset.type, when)
getattr(rset, when + "_action")(action)
except ex.excError:
raise
except ex.excAbortAction:
continue
except:
self.save_exc()
raise ex.excError
def do_snap_trigger(when):
"""
Execute the snap trigger.
"""
if not need_snap:
return
trigger = getattr(self, when + "snap_trigger")
if trigger is None:
return
results = self.vcall(trigger)
if results[0] != 0:
raise ex.excError(results[2])
need_snap = self.need_snap_trigger(rsets, action)
# Multiple resourcesets of the same type need to be sorted
# so that the start and stop action happen in a predictible order.
# Sort alphanumerically on reseourceset type.
#
# Example, on start:
# app
# app.1
# app.2
# on stop:
# app.2
# app.1
# app
reverse = "stop" in action or action in ("rollback", "shutdown", "unprovision")
rsets = sorted(rsets, key=lambda x: x.type, reverse=reverse)
# snapshots are created in pre_action and destroyed in post_action
# place presnap and postsnap triggers around pre_action
do_snap_trigger("pre")
do_trigger("pre")
do_snap_trigger("post")
for rset in rsets:
self.log.debug('set_action: action=%s rset=%s', action, rset.type)
rset.action(action, tags=tags, xtags=xtags)
do_trigger("post")
def __str__(self):
"""
The Svc class print formatter.
"""
output = "Service %s available resources:" % self.svcname
for key in self.resourcesets_by_type:
output += " %s" % key
output += "\n"
for rset in self.resourcesets:
output += " [%s]" % str(rset)
return output
def status(self):
"""
Return the aggregate status a service.
"""
group_status = self.group_status()
return group_status["overall"].status
def print_status_data(self):
"""
Return a structure containing hierarchical status of
the service.
"""
data = {
"resources": {},
"frozen": self.frozen(),
}
containers = self.get_resources('container')
if len(containers) > 0:
data['encap'] = {}
for container in containers:
if container.name is None or len(container.name) == 0:
continue
try:
data['encap'][container.name] = self.encap_json_status(container)
except:
data['encap'][container.name] = {'resources': {}}
for rset in self.get_resourcesets(STATUS_TYPES, strict=True):
for resource in rset.resources:
(
rid,
rtype,
status,
label,
log,
monitor,
disable,
optional,
encap
) = resource.status_quad(color=False)
data['resources'][rid] = {
'status': str(status),
'type': rtype,
'label': label,
'log': log,
'tags': sorted(list(resource.tags)),
'monitor':monitor,
'disable': disable,
'optional': optional,
'encap': encap,
}
group_status = self.group_status()
for group in group_status:
data[group] = str(group_status[group])
return data
def env_section_keys_evaluated(self):
"""
Return the dict of key/val pairs in the [env] section of the
service configuration, after dereferencing.
"""
return self.env_section_keys(evaluate=True)
def env_section_keys(self, evaluate=False):
"""
Return the dict of key/val pairs in the [env] section of the
service configuration, without dereferencing.
"""
config = self.print_config_data()
try:
from collections import OrderedDict
data = OrderedDict()
except ImportError:
data = {}
for key in config.get("env", {}).keys():
if evaluate:
data[key] = conf_get_string_scope(self, self.config, 'env', key)
else:
data[key] = config["env"][key]
return data
def print_config_data(self):
"""
Return a simple dict (OrderedDict if possible), fed with the
service configuration sections and keys
"""
try:
from collections import OrderedDict
best_dict = OrderedDict
except ImportError:
best_dict = dict
svc_config = best_dict()
tmp = best_dict()
self.load_config()
config = self.config
defaults = config.defaults()
for key in defaults.keys():
tmp[key] = defaults[key]
svc_config['DEFAULT'] = tmp
config._defaults = {}
sections = config.sections()
for section in sections:
options = config.options(section)
tmpsection = best_dict()
for option in options:
if config.has_option(section, option):
tmpsection[option] = config.get(section, option)
svc_config[section] = tmpsection
self.load_config()
return svc_config
def logs(self):
"""
Extract and display the service logs, honoring --color and --debug
"""
if len(self.log.handlers) == 0:
return
logfile = self.log.handlers[0].stream.name
if not os.path.exists(logfile):
return
from rcColor import color, colorize
class Shared(object):
"""
A bare class to store a persistent flag.
"""
skip = False
def fmt(line):
"""
Format a log line, colorizing the log level.
Return the line as a string buffer.
"""
line = line.rstrip("\n")
elements = line.split(" - ")
if len(elements) < 3 or elements[2] not in ("DEBUG", "INFO", "WARNING", "ERROR"):
# this is a log line continuation (command output for ex.)
if Shared.skip:
return
else:
return line
if not self.options.debug and elements[2] == "DEBUG":
Shared.skip = True
return
else:
Shared.skip = False
if not rcLogger.include_svcname:
elements[1] = elements[1].replace(self.svcname, "").lstrip(".")
if len(elements[1]) > rcLogger.namelen:
elements[1] = "*"+elements[1][-(rcLogger.namelen-1):]
elements[1] = rcLogger.namefmt % elements[1]
elements[1] = colorize(elements[1], color.BOLD)
elements[2] = "%-7s" % elements[2]
elements[2] = elements[2].replace("ERROR", colorize("ERROR", color.RED))
elements[2] = elements[2].replace("WARNING", colorize("WARNING", color.BROWN))
elements[2] = elements[2].replace("INFO", colorize("INFO", color.LIGHTBLUE))
return " ".join(elements)
try:
pipe = os.popen('TERM=xterm less -R', 'w')
except:
pipe = sys.stdout
try:
with open(logfile, "r") as ofile:
for line in ofile.readlines():
buff = fmt(line)
if buff:
pipe.write(buff+"\n")
except BrokenPipeError:
try:
sys.stdout = os.fdopen(1)
except (AttributeError, OSError, IOError):
pass
finally:
if pipe != sys.stdout:
pipe.close()
def print_resource_status(self):
"""
Print a single resource status string.
"""
if len(self.action_rid) != 1:
print("action 'print_resource_status' is not allowed on mutiple "
"resources", file=sys.stderr)
return 1
for rid in self.action_rid:
if rid not in self.resources_by_id:
print("resource not found")
continue
resource = self.resources_by_id[rid]
print(rcStatus.colorize_status(str(resource.status())))
return 0
def print_status(self):
"""
Display in human-readable format the hierarchical service status.
"""
if self.options.format is not None:
return self.print_status_data()
from textwrap import wrap
from rcUtilities import term_width
from rcColor import color, colorize
width = term_width()
def print_res(squad, fmt, pfx, subpfx=None):
"""
Print a resource line, with forest markers, rid, flags, label and
resource log.
"""
if subpfx is None:
subpfx = pfx
rid, status, label, log, monitor, disabled, optional, encap = squad
flags = ''
flags += 'M' if monitor else '.'
flags += 'D' if disabled else '.'
flags += 'O' if optional else '.'
flags += 'E' if encap else '.'
print(fmt % (rid, flags, rcStatus.colorize_status(status), label))
for msg in log.split("\n"):
if len(msg) > 0:
if subpfx and not subpfx.startswith(color.END):
subpfx = color.END + subpfx
print('\n'.join(wrap(msg,
initial_indent=subpfx,
subsequent_indent=subpfx,
width=width
)
)
)
if self.options.show_disabled is not None:
discard_disabled = not self.options.show_disabled
else:
discard_disabled = not self.show_disabled
def get_res(group):
"""
Wrap get_resources() with discard_disable relaying and sorted
resultset.
"""
resources = self.get_resources(
group,
discard_disabled=discard_disabled,
)
return sorted(resources)
avail_resources = get_res("ip")
avail_resources += get_res("disk")
avail_resources += get_res("fs")
avail_resources += get_res("container")
avail_resources += get_res("share")
avail_resources += get_res("app")
accessory_resources = get_res("hb")
accessory_resources += get_res("stonith")
accessory_resources += get_res("sync")
accessory_resources += get_res("task")
n_accessory_resources = len(accessory_resources)
print(colorize(self.svcname, color.BOLD))
frozen = 'frozen' if self.frozen() else ''
fmt = "%-20s %4s %-10s %s"
color_status = rcStatus.colorize_status(self.group_status()['overall'])
print(fmt % ("overall", '', color_status, frozen))
if n_accessory_resources == 0:
fmt = "'- %-17s %4s %-10s %s"
head_c = " "
else:
fmt = "|- %-17s %4s %-10s %s"
head_c = "|"
color_status = rcStatus.colorize_status(self.group_status()['avail'])
print(fmt % ("avail", '', color_status, ''))
ers = {}
for container in self.get_resources('container'):
try:
ejs = self.encap_json_status(container)
ers[container.rid] = ejs["resources"]
if ejs.get("frozen", False):
container.status_log("frozen", "info")
except ex.excNotAvailable:
ers[container.rid] = {}
except Exception as exc:
print(exc)
ers[container.rid] = {}
lines = []
encap_squad = {}
for resource in avail_resources:
(
rid,
rtype,
status,
label,
log,
monitor,
disable,
optional,
encap
) = resource.status_quad()
lines.append((rid, status, label, log, monitor, disable, optional, encap))
if rid.startswith("container") and rid in ers:
squad = []
for _rid, val in ers[rid].items():
squad.append((
_rid,
val['status'],
val['label'],
val['log'],
val['monitor'],
val['disable'],
val['optional'],
val['encap'],
))
encap_squad[rid] = squad
last = len(lines) - 1
if last >= 0:
for idx, line in enumerate(lines):
if idx == last:
fmt = head_c+" '- %-14s %4s %-10s %s"
pfx = head_c+" %-14s %4s %-10s " % ('', '', '')
subpfx = head_c+" %-11s %4s %-10s " % ('', '', '')
print_res(line, fmt, pfx, subpfx=subpfx)
subresbar = " "
else:
fmt = head_c+" |- %-14s %4s %-10s %s"
pfx = head_c+" | %-14s %4s %-10s " % ('', '', '')
if line[0] in encap_squad and len(encap_squad[line[0]]) > 0:
subpfx = head_c+" | | %-11s %4s %-10s " % ('', '', '')
else:
subpfx = None
print_res(line, fmt, pfx, subpfx=subpfx)
subresbar = "|"
if line[0] in encap_squad:
_last = len(encap_squad[line[0]]) - 1
if _last >= 0:
for _idx, _line in enumerate(encap_squad[line[0]]):
if _idx == _last:
fmt = head_c+" "+subresbar+" '- %-11s %4s %-10s %s"
pfx = head_c+" "+subresbar+" %-11s %4s %-10s " % ('', '', '')
print_res(_line, fmt, pfx)
else:
fmt = head_c+" "+subresbar+" |- %-11s %4s %-10s %s"
pfx = head_c+" "+subresbar+" | %-11s %4s %-10s " % ('', '', '')
print_res(_line, fmt, pfx)
if n_accessory_resources > 0:
fmt = "'- %-17s %4s %-10s %s"
print(fmt%("accessory", '', '', ''))
lines = []
for resource in accessory_resources:
rid, rtype, status, label, log, monitor, disable, optional, encap = resource.status_quad()
if rid in ers:
status = rcStatus.Status(rcStatus.status_value(ers[rid]['status']))
lines.append((rid, status, label, log, monitor, disable, optional, encap))
last = len(lines) - 1
if last >= 0:
for idx, line in enumerate(lines):
if idx == last:
fmt = " '- %-14s %4s %-10s %s"
pfx = " %-14s %4s %-10s " % ('', '', '')
print_res(line, fmt, pfx)
else:
fmt = " |- %-14s %4s %-10s %s"
pfx = " | %-14s %4s %-10s " % ('', '', '')
print_res(line, fmt, pfx)
def svcmon_push_lists(self, status=None):
"""
Return the list of resource status in a format adequate for
collector feeding.
"""
if status is None:
status = self.group_status()
if self.frozen():
frozen = "1"
else:
frozen = "0"
r_vars = [
"svcname",
"nodename",
"vmname",
"rid",
"res_type",
"res_desc",
"res_status",
"res_monitor",
"res_optional",
"res_disable",
"updated",
"res_log",
]
r_vals = []
now = datetime.datetime.now()
for rset in self.resourcesets:
for resource in rset.resources:
if 'encap' in resource.tags:
continue
rstatus = str(rcStatus.Status(resource.rstatus))
r_vals.append([
self.svcname,
rcEnv.nodename,
"",
resource.rid,
resource.type,
resource.label,
str(rstatus),
"1" if resource.monitor else "0",
"1" if resource.optional else "0",
"1" if resource.disabled else "0",
str(now),
resource.status_logs_str(),
])
g_vars = [
"mon_svcname",
"mon_svctype",
"mon_nodname",
"mon_vmname",
"mon_vmtype",
"mon_nodtype",
"mon_ipstatus",
"mon_diskstatus",
"mon_syncstatus",
"mon_hbstatus",
"mon_containerstatus",
"mon_fsstatus",
"mon_sharestatus",
"mon_appstatus",
"mon_availstatus",
"mon_overallstatus",
"mon_updated",
"mon_prinodes",
"mon_frozen",
]
containers = self.get_resources('container')
containers = [container for container in containers \
if container.type != "container.docker"]
if len(containers) == 0:
g_vals = [
self.svcname,
self.svc_env,
rcEnv.nodename,
"",
"hosted",
rcEnv.node_env,
str(status["ip"]),
str(status["disk"]),
str(status["sync"]),
str(status["hb"]),
str(status["container"]),
str(status["fs"]),
str(status["share"]),
str(status["app"]),
str(status["avail"]),
str(status["overall"]),
str(now),
' '.join(self.nodes),
frozen,
]
else:
g_vals = []
for container in containers:
ers = {}
try:
ers = self.encap_json_status(container)
except ex.excNotAvailable:
ers = {
'resources': [],
'ip': 'n/a',
'disk': 'n/a',
'sync': 'n/a',
'hb': 'n/a',
'container': 'n/a',
'fs': 'n/a',
'share': 'n/a',
'app': 'n/a',
'avail': 'n/a',
'overall': 'n/a',
}
except Exception as exc:
print(exc)
continue
vhostname = container.vm_hostname()
for rid in ers['resources']:
rstatus = ers['resources'][rid]['status']
r_vals.append([
self.svcname,
rcEnv.nodename,
vhostname,
str(rid),
ers['resources'][rid].get('type', ''),
str(ers['resources'][rid]['label']),
str(rstatus),
"1" if ers['resources'][rid].get('monitor', False) else "0",
"1" if ers['resources'][rid].get('optional', False) else "0",
"1" if ers['resources'][rid].get('disabled', False) else "0",
str(now),
str(ers['resources'][rid]['log']),
])
if 'avail' not in status or 'avail' not in ers:
continue
g_vals.append([
self.svcname,
self.svc_env,
rcEnv.nodename,
vhostname,
container.type.replace('container.', ''),
rcEnv.node_env,
str(rcStatus.Status(status["ip"])+rcStatus.Status(ers['ip'])),
str(rcStatus.Status(status["disk"])+rcStatus.Status(ers['disk'])),
str(rcStatus.Status(status["sync"])+rcStatus.Status(ers['sync'])),
str(rcStatus.Status(status["hb"])+rcStatus.Status(ers['hb'])),
str(rcStatus.Status(status["container"])+rcStatus.Status(ers['container'])),
str(rcStatus.Status(status["fs"])+rcStatus.Status(ers['fs'])),
str(rcStatus.Status(status["share"])+rcStatus.Status(ers['share'] if 'share' in ers else 'n/a')),
str(rcStatus.Status(status["app"])+rcStatus.Status(ers['app'])),
str(rcStatus.Status(status["avail"])+rcStatus.Status(ers['avail'])),
str(rcStatus.Status(status["overall"])+rcStatus.Status(ers['overall'])),
str(now),
' '.join(self.nodes),
frozen,
])
return g_vars, g_vals, r_vars, r_vals
def get_rset_status(self, groups):
"""
Return the aggregated status of all resources of the specified resource
sets, as a dict of status indexed by resourceset type.
"""
self.setup_environ()
rsets_status = {}
for status_type in STATUS_TYPES:
group = status_type.split('.')[0]
if group not in groups:
continue
for rset in self.get_resourcesets(status_type, strict=True):
if rset.type not in rsets_status:
rsets_status[rset.type] = rset.status()
else:
rsets_status[rset.type] += rset.status()
return rsets_status
def resource_monitor(self):
"""
The resource monitor action entrypoint
"""
if self.sched.skip_action("resource_monitor"):
return
self.task_resource_monitor()
@scheduler_fork
def task_resource_monitor(self):
"""
The resource monitor action.
Trigger the service defined monitor_action if the hb resource is up
but a monitored resource is down and not restartable.
"""
self.options.refresh = True
if self.group_status_cache is None:
self.group_status(excluded_groups=set(['sync']))
if not self.ha:
self.log.debug("no active heartbeat resource. no need to check "
"monitored resources.")
return
hb_status = self.group_status_cache['hb']
if hb_status.status != rcStatus.UP:
self.log.debug("heartbeat status is not up. no need to check "
"monitored resources.")
return
monitored_resources = []
for resource in self.get_resources():
if resource.monitor:
monitored_resources.append(resource)
for resource in monitored_resources:
if resource.rstatus not in (rcStatus.UP, rcStatus.STDBY_UP, rcStatus.NA):
if len(resource.status_logs) > 0:
rstatus_log = " (%s)" % resource.status_logs_str().strip().strip("# ")
else:
rstatus_log = ''
self.log.info("monitored resource %s is in state %s%s",
resource.rid,
str(resource.rstatus),
rstatus_log)
if self.monitor_action is not None and \
hasattr(self, self.monitor_action):
raise ex.MonitorAction
else:
self.log.info("Would TOC but no (or unknown) resource "
"monitor action set.")
return
for container in self.get_resources('container'):
try:
encap_status = self.encap_json_status(container)
res = encap_status["resources"]
except Exception:
encap_status = {}
res = {}
if encap_status.get("frozen"):
continue
for rid, rdata in res.items():
if not rdata.get("monitor"):
continue
erid = rid+"@"+container.name
monitored_resources.append(erid)
if rdata.get("status") not in ("up", "n/a"):
if len(rdata.get("log")) > 0:
rstatus_log = " (%s)" % rdata.get("log").strip().strip("# ")
else:
rstatus_log = ""
self.log.info("monitored resource %s is in state %s%s",
erid, rdata.get("status"), rstatus_log)
if self.monitor_action is not None and \
hasattr(self, self.monitor_action):
raise ex.MonitorAction
else:
self.log.info("Would TOC but no (or unknown) resource "
"monitor action set.")
return
if len(monitored_resources) == 0:
self.log.debug("no monitored resource")
else:
rids = ','.join([res if is_string(res) else res.rid \
for res in monitored_resources])
self.log.debug("monitored resources are up (%s)", rids)
def reboot(self):
"""
A method wrapper the node reboot method.
"""
self.node.system._reboot()
def crash(self):
"""
A method wrapper the node crash method.
"""
self.node.system.crash()
def _pg_freeze(self):
"""
Wrapper function for the process group freeze method.
"""
return self._pg_freezer("freeze")
def _pg_thaw(self):
"""
Wrapper function for the process group thaw method.
"""
return self._pg_freezer("thaw")
def _pg_kill(self):
"""
Wrapper function for the process group kill method.
"""
return self._pg_freezer("kill")
def _pg_freezer(self, action):
"""
Wrapper function for the process group methods.
"""
if not self.create_pg:
return
if self.pg is None:
return
if action == "freeze":
self.pg.freeze(self)
elif action == "thaw":
self.pg.thaw(self)
elif action == "kill":
self.pg.kill(self)
@lazy
def pg(self):
"""
A lazy property to import the system-specific process group module
on-demand and expose it as self.pg
"""
try:
mod = __import__('rcPg'+rcEnv.sysname)
except ImportError:
self.log.info("process group are not supported on this platform")
return
except Exception as exc:
print(exc)
raise
return mod
def pg_freeze(self):
"""
Freeze all process of the process groups of the service.
"""
if self.command_is_scoped():
self.sub_set_action('app', '_pg_freeze')
self.sub_set_action('container', '_pg_freeze')
else:
self._pg_freeze()
for resource in self.get_resources(["app", "container"]):
resource.status(refresh=True, restart=False)
def pg_thaw(self):
"""
Thaw all process of the process groups of the service.
"""
if self.command_is_scoped():
self.sub_set_action('app', '_pg_thaw')
self.sub_set_action('container', '_pg_thaw')
else:
self._pg_thaw()
for resource in self.get_resources(["app", "container"]):
resource.status(refresh=True, restart=False)
def pg_kill(self):
"""
Kill all process of the process groups of the service.
"""
if self.command_is_scoped():
self.sub_set_action('app', '_pg_kill')
self.sub_set_action('container', '_pg_kill')
else:
self._pg_kill()
for resource in self.get_resources(["app", "container"]):
resource.status(refresh=True, restart=False)
def freezestop(self):
"""
The 'freezestop' action entrypoint.
Call the freezestop method of resources implementing it.
"""
self.sub_set_action('hb.openha', 'freezestop')
def stonith(self):
"""
The 'stonith' action entrypoint.
Call the stonith method of resources implementing it.
"""
self.sub_set_action('stonith.ilo', 'start')
self.sub_set_action('stonith.callout', 'start')
def toc(self):
"""
Call the resource monitor action.
"""
self.log.info("start monitor action '%s'", self.monitor_action)
getattr(self, self.monitor_action)()
def encap_cmd(self, cmd, verbose=False, error="raise"):
"""
Execute a command in all service containers.
If error is set to "raise", stop iterating at first error.
If error is set to "continue", log errors and proceed to the next
container.
"""
for container in self.get_resources('container'):
try:
self._encap_cmd(cmd, container, verbose=verbose)
except ex.excEncapUnjoignable:
if error != "continue":
self.log.error("container %s is not joinable to execute "
"action '%s'", container.name, ' '.join(cmd))
raise
elif verbose:
self.log.warning("container %s is not joinable to execute "
"action '%s'", container.name, ' '.join(cmd))
def _encap_cmd(self, cmd, container, verbose=False):
"""
Execute a command in a service container.
"""
if container.pg_frozen():
raise ex.excError("can't join a frozen container. abort encap "
"command.")
vmhostname = container.vm_hostname()
try:
autostart_node = conf_get_string_scope(self, self.config,
'DEFAULT', 'autostart_node',
impersonate=vmhostname).split()
except ex.OptNotFound:
autostart_node = []
if cmd == ["start"] and container.booted and vmhostname in autostart_node:
self.log.info("skip encap service start in container %s: already "
"started on boot", vmhostname)
return '', '', 0
if not self.has_encap_resources:
self.log.debug("skip encap %s: no encap resource", ' '.join(cmd))
return '', '', 0
if not container.is_up():
msg = "skip encap %s: the container is not running here" % ' '.join(cmd)
if verbose:
self.log.info(msg)
else:
self.log.debug(msg)
return '', '', 0
if self.options.slave is not None and not \
(container.name in self.options.slave or \
container.rid in self.options.slave):
# no need to run encap cmd (container not specified in --slave)
return '', '', 0
if cmd == ['start'] and not self.need_start_encap(container):
self.log.info("skip start in container %s: the encap service is "
"configured to start on container boot.",
container.name)
return '', '', 0
# now we known we'll execute a command in the slave, so purge the
# encap cache
self.purge_cache_encap_json_status(container.rid)
# wait for the container multi-user state
if cmd[0] in ["start", "boot"] and hasattr(container, "wait_multi_user"):
container.wait_multi_user()
options = ['--daemon']
if self.options.dry_run:
options.append('--dry-run')
if self.options.refresh:
options.append('--refresh')
if self.options.disable_rollback:
options.append('--disable-rollback')
if self.options.rid:
options.append('--rid')
options.append(self.options.rid)
if self.options.tags:
options.append('--tags')
options.append(self.options.tags)
if self.options.subsets:
options.append('--subsets')
options.append(self.options.subsets)
paths = get_osvc_paths(osvc_root_path=container.osvc_root_path,
sysname=container.guestos)
cmd = [paths.svcmgr, '-s', self.svcname] + options + cmd
if container is not None and hasattr(container, "rcmd"):
out, err, ret = container.rcmd(cmd)
elif hasattr(container, "runmethod"):
cmd = container.runmethod + cmd
out, err, ret = justcall(cmd)
else:
raise ex.excEncapUnjoignable("undefined rcmd/runmethod in "
"resource %s"%container.rid)
if verbose:
self.log.info('logs from %s child service:', container.name)
print(out)
if len(err) > 0:
print(err)
if ret != 0:
raise ex.excError("error from encap service command '%s': "
"%d\n%s\n%s"%(' '.join(cmd), ret, out, err))
return out, err, ret
def get_encap_json_status_path(self, rid):
"""
Return the path of the file where the status data of the service
encapsulated in the container identified by will be written
for caching.
"""
return os.path.join(rcEnv.pathvar, self.svcname, "encap.status."+rid)
def purge_cache_encap_json_status(self, rid):
"""
Delete the on-disk cache of status of the service encapsulated in
the container identified by .
"""
if rid in self.encap_json_status_cache:
del self.encap_json_status_cache[rid]
path = self.get_encap_json_status_path(rid)
if os.path.exists(path):
os.unlink(path)
def put_cache_encap_json_status(self, rid, data):
"""
Write the on-disk cache of status of the service encapsulated in
the container identified by .
"""
import json
self.encap_json_status_cache[rid] = data
path = self.get_encap_json_status_path(rid)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
try:
with open(path, 'w') as ofile:
ofile.write(json.dumps(data))
except (IOError, OSError, ValueError):
os.unlink(path)
def get_cache_encap_json_status(self, rid):
"""
Fetch the on-disk cache of status of the service encapsulated in
the container identified by .
"""
import json
if rid in self.encap_json_status_cache:
return self.encap_json_status_cache[rid]
path = self.get_encap_json_status_path(rid)
try:
with open(path, 'r') as ofile:
group_status = json.loads(ofile.read())
except (IOError, OSError, ValueError):
group_status = None
return group_status
def encap_json_status(self, container, refresh=False):
"""
Return the status data from the agent runnning the encapsulated part
of the service.
"""
if container.guestos == 'windows':
raise ex.excNotAvailable
if container.status(ignore_nostatus=True) == rcStatus.DOWN:
#
# passive node for the vservice => forge encap resource status
# - encap sync are n/a
# - other encap res are down
#
group_status = {
"avail": "down",
"overall": "down",
"resources": {},
}
groups = set(["container", "ip", "disk", "fs", "share", "hb"])
for group in groups:
group_status[group] = 'down'
for rset in self.get_resourcesets(STATUS_TYPES, strict=True):
group = rset.type.split('.')[0]
if group not in groups:
continue
for resource in rset.resources:
if not self.encap and 'encap' in resource.tags:
group_status['resources'][resource.rid] = {'status': 'down'}
groups = set(["app", "sync"])
for group in groups:
group_status[group] = 'n/a'
for rset in self.get_resourcesets(groups):
group = rset.type.split('.')[0]
if group not in groups:
continue
for resource in rset.resources:
if not self.encap and 'encap' in resource.tags:
group_status['resources'][resource.rid] = {'status': 'n/a'}
return group_status
if not refresh and not self.options.refresh:
group_status = self.get_cache_encap_json_status(container.rid)
if group_status:
return group_status
group_status = {
"avail": "n/a",
"overall": "n/a",
"resources": {},
}
groups = set([
"container",
"ip",
"disk",
"fs",
"share",
"hb",
"stonith",
"task",
"app",
"sync"
])
for group in groups:
group_status[group] = 'n/a'
cmd = ['print', 'status', '--format', 'json']
try:
results = self._encap_cmd(cmd, container)
except ex.excError:
return group_status
except Exception as exc:
print(exc)
return group_status
import json
try:
group_status = json.loads(results[0])
except:
pass
self.put_cache_encap_json_status(container.rid, group_status)
return group_status
def group_status(self, groups=None, excluded_groups=None):
"""
Return the status data of the service.
"""
if excluded_groups is None:
excluded_groups = set()
if groups is None:
groups = set(DEFAULT_STATUS_GROUPS)
status = {}
moregroups = groups | set(["overall", "avail"])
groups = groups - excluded_groups
self.get_rset_status(groups)
# initialise status of each group
for group in moregroups:
status[group] = rcStatus.Status(rcStatus.NA)
for driver in [_driver for _driver in STATUS_TYPES if \
not _driver.startswith('sync') and \
not _driver.startswith('hb') and \
not _driver.startswith('task') and \
not _driver.startswith('stonith')]:
if driver in excluded_groups:
continue
group = driver.split('.')[0]
if group not in groups:
continue
for resource in self.get_resources(driver):
rstatus = resource.status()
status[group] += rstatus
status["avail"] += rstatus
if status["avail"].status == rcStatus.STDBY_UP_WITH_UP:
status["avail"].status = rcStatus.UP
# now that we now the avail status we can promote
# stdbyup to up
for group in status:
if status[group] == rcStatus.STDBY_UP:
status[group] = rcStatus.UP
elif status["avail"].status == rcStatus.STDBY_UP_WITH_DOWN:
status["avail"].status = rcStatus.STDBY_UP
# overall status is avail + accessory resources status
# seed overall with avail
status["overall"] = rcStatus.Status(status["avail"])
for resource in self.get_resources():
group = resource.type.split(".")[0]
if group not in groups:
continue
if resource.status_logs_count(levels=["warn", "error"]) > 0:
status["overall"] += rcStatus.WARN
break
for driver in [_driver for _driver in STATUS_TYPES if \
_driver.startswith('stonith')]:
if 'stonith' not in groups:
continue
if driver in excluded_groups:
continue
for resource in self.get_resources(driver):
rstatus = resource.status()
status['stonith'] += rstatus
status["overall"] += rstatus
for driver in [_driver for _driver in STATUS_TYPES if \
_driver.startswith('hb')]:
if 'hb' not in groups:
continue
if driver in excluded_groups:
continue
for resource in self.get_resources(driver):
rstatus = resource.status()
status['hb'] += rstatus
status["overall"] += rstatus
for driver in [_driver for _driver in STATUS_TYPES if \
_driver.startswith('sync')]:
if 'sync' not in groups:
continue
if driver in excluded_groups:
continue
for resource in self.get_resources(driver):
#" sync are expected to be up
rstatus = resource.status()
status['sync'] += rstatus
if rstatus == rcStatus.UP:
status["overall"] += rcStatus.UNDEF
elif rstatus in [rcStatus.NA, rcStatus.UNDEF]:
status["overall"] += rstatus
else:
status["overall"] += rcStatus.WARN
for driver in [_driver for _driver in STATUS_TYPES if \
_driver.startswith('task')]:
if 'task' not in groups:
continue
if driver in excluded_groups:
continue
for resource in self.get_resources(driver):
rstatus = resource.status()
status['task'] += rstatus
status["overall"] += rstatus
self.group_status_cache = status
return status
def print_disklist(self):
"""
Print the list of disks the service handles.
"""
if self.options.format is not None:
return self.print_disklist_data()
disks = self.disklist()
if len(disks) > 0:
print('\n'.join(disks))
def print_devlist(self):
"""
Print the list of devices the service handles.
"""
if self.options.format is not None:
return self.print_devlist_data()
devs = self.devlist()
if len(devs) > 0:
print('\n'.join(devs))
def print_disklist_data(self):
"""
Return the list of disks the service handles.
"""
return list(self.disklist())
def print_devlist_data(self):
"""
Return the list of devices the service handles.
"""
return list(self.devlist())
def disklist(self):
"""
Return the set of disks the service handles, from cache if possible.
"""
if len(self.disks) == 0:
self.disks = self._disklist()
return self.disks
def _disklist(self):
"""
Return the set of disks the service handles.
"""
disks = set()
for resource in self.get_resources():
if resource.skip:
continue
disks |= resource.disklist()
self.log.debug("found disks %s held by service", disks)
return disks
def devlist(self, filtered=True):
"""
Return the set of devices the service handles, from cache if possible.
"""
if len(self.devs) == 0:
self.devs = self._devlist(filtered=filtered)
return self.devs
def _devlist(self, filtered=True):
"""
Return the set of devices the service handles.
"""
devs = set()
for resource in self.get_resources():
if filtered and resource.skip:
continue
devs |= resource.devlist()
self.log.debug("found devs %s held by service", devs)
return devs
def get_non_affine_svc(self):
"""
Return the list services defined as anti-affine, filtered to retain
only the running ones (those that will cause an actual affinity error
on start for this service).
"""
if not self.anti_affinity:
return []
self.log.debug("build anti-affine services %s", str(self.anti_affinity))
self.node.build_services(svcnames=self.anti_affinity)
running_af_svc = []
for svc in self.node.svcs:
if svc.svcname == self.svcname:
continue
avail = svc.group_status()['avail']
if str(avail) != "down":
running_af_svc.append(svc.svcname)
return running_af_svc
def print_config_mtime(self):
"""
Print the service configuration file last modified timestamp. Used by
remote agents to determine which agent holds the most recent version.
"""
mtime = os.stat(self.paths.cf).st_mtime
print(mtime)
def need_start_encap(self, container):
"""
Return True if this service has an encapsulated part that would need
starting.
"""
self.load_config()
defaults = self.config.defaults()
if defaults.get('autostart_node@'+container.name) in (container.name, 'encapnodes'):
return False
elif defaults.get('autostart_node@encapnodes') in (container.name, 'encapnodes'):
return False
elif defaults.get('autostart_node') in (container.name, 'encapnodes'):
return False
return True
def boot(self):
"""
The 'boot' action entrypoint.
A boot is a start if the running node is defined as autostart_node.
A boot is a startstandby if the running node is not defined as autostart_node.
A boot is a startstandby if the service is handled by a HA monitor.
Start errors cause a fallback to startstandby as a best effort.
"""
if rcEnv.nodename not in self.autostart_node:
self.startstandby()
return
resources = self.get_resources('hb')
if len(resources) > 0:
self.log.warning("cluster nodes should not be in autostart_nodes for HA configuration")
self.startstandby()
return
try:
self.start()
except ex.excError as exc:
self.log.error(str(exc))
self.log.info("start failed. try to start standby")
self.startstandby()
def shutdown(self):
self.options.force = True
self.master_shutdownhb()
self.slave_shutdown()
try:
self.master_shutdownapp()
except ex.excError:
pass
self.shutdowncontainer()
self.master_shutdownshare()
self.master_shutdownfs()
self.master_shutdownip()
def command_is_scoped(self, options=None):
"""
Return True if a resource filter has been setup through
--rid, --subsets or --tags
"""
if options is None:
options = self.options
if (options.rid is not None and options.rid != "") or \
(options.tags is not None and options.tags != "") or \
(options.subsets is not None and options.subsets != ""):
return True
return False
def run_task(self, rid):
if self.sched.skip_action(rid):
return
self.resources_by_id[rid].run()
def run(self):
self.master_run()
self.slave_run()
@_master_action
def master_run(self):
self.sub_set_action("task", "run")
@_slave_action
def slave_run(self):
self.encap_cmd(['run'], verbose=True)
def start(self):
self.master_starthb()
self.abort_start()
af_svc = self.get_non_affine_svc()
if len(af_svc) != 0:
if self.options.ignore_affinity:
self.log.error("force start of %s on the same node as %s "
"despite anti-affinity settings",
self.svcname, ', '.join(af_svc))
else:
self.log.error("refuse to start %s on the same node as %s",
self.svcname, ', '.join(af_svc))
return
self.master_startip()
self.master_startfs()
self.master_startshare()
self.master_startcontainer()
self.master_startapp()
self.slave_start()
@_slave_action
def slave_start(self):
self.encap_cmd(['start'], verbose=True)
def rollback(self):
self.encap_cmd(['rollback'], verbose=True)
try:
self.rollbackapp()
except ex.excError:
pass
self.rollbackcontainer()
self.rollbackshare()
self.rollbackfs()
self.rollbackip()
def stop(self):
self.master_stophb()
self.slave_stop()
try:
self.master_stopapp()
except ex.excError:
pass
self.stopcontainer()
self.master_stopshare()
self.master_stopfs()
self.master_stopip()
@_slave_action
def slave_shutdown(self):
self.encap_cmd(['shutdown'], verbose=True, error="continue")
@_slave_action
def slave_stop(self):
self.encap_cmd(['stop'], verbose=True, error="continue")
def cluster_mode_safety_net(self, action, options):
"""
Raise excError to bar actions executed without --cluster on monitored
services.
Raise excAbortAction to bar actions executed with --cluster on monitored
services with disabled hb resources (maintenance mode).
In any case, consider an action with --rid, --tags or --subset set is
not to be blocked, as it is a surgical operation typical of maintenance
operations.
"""
if action in ACTIONS_ALLOW_ON_CLUSTER:
return
if self.command_is_scoped(options):
self.log.debug("stop: called with --rid, --tags or --subset, allow "
"action on ha service.")
return
n_hb = 0
n_hb_enabled = 0
for resource in self.get_resources('hb', discard_disabled=False):
n_hb += 1
if not resource.disabled:
n_hb_enabled += 1
if n_hb == 0:
return
if n_hb > 0 and n_hb_enabled == 0 and self.options.cluster:
raise ex.excAbortAction("this service has heartbeat resources, "
"but all disabled. this state is "
"interpreted as a maintenance mode. "
"actions submitted with --cluster are not "
"allowed to inhibit actions triggered by "
"the heartbeat daemon.")
if n_hb_enabled == 0:
return
if not self.options.cluster:
for resource in self.get_resources("hb"):
if not resource.skip and hasattr(resource, action):
self.running_action = action
getattr(resource, action)()
raise ex.excError("this service is managed by a clusterware, thus "
"direct service manipulation is disabled (%s). "
"the --cluster option circumvent this safety "
"net." % action)
def starthb(self):
self.master_starthb()
self.slave_starthb()
@_slave_action
def slave_starthb(self):
self.encap_cmd(['starthb'], verbose=True)
@_master_action
def master_starthb(self):
self.master_hb('start')
@_master_action
def master_startstandbyhb(self):
self.master_hb('startstandby')
@_master_action
def master_shutdownhb(self):
self.master_hb('shutdown')
@_master_action
def master_stophb(self):
self.master_hb('stop')
def master_hb(self, action):
self.sub_set_action("hb", action)
def stophb(self):
self.slave_stophb()
self.master_stophb()
@_slave_action
def slave_stophb(self):
self.encap_cmd(['stophb'], verbose=True)
def startdisk(self):
self.master_startdisk()
self.slave_startdisk()
@_slave_action
def slave_startdisk(self):
self.encap_cmd(['startdisk'], verbose=True)
@_master_action
def master_startstandbydisk(self):
self.sub_set_action("sync.netapp", "startstandby")
self.sub_set_action("sync.dcsckpt", "startstandby")
self.sub_set_action("sync.nexenta", "startstandby")
self.sub_set_action("sync.symclone", "startstandby")
self.sub_set_action("sync.symsnap", "startstandby")
self.sub_set_action("sync.ibmdssnap", "startstandby")
self.sub_set_action("disk.scsireserv", "startstandby", xtags=set(['zone']))
self.sub_set_action(DISK_TYPES, "startstandby", xtags=set(['zone']))
@_master_action
def master_startdisk(self):
self.sub_set_action("sync.netapp", "start")
self.sub_set_action("sync.dcsckpt", "start")
self.sub_set_action("sync.nexenta", "start")
self.sub_set_action("sync.symclone", "start")
self.sub_set_action("sync.symsnap", "start")
self.sub_set_action("sync.symsrdfs", "start")
self.sub_set_action("sync.hp3par", "start")
self.sub_set_action("sync.ibmdssnap", "start")
self.sub_set_action("disk.scsireserv", "start", xtags=set(['zone']))
self.sub_set_action(DISK_TYPES, "start", xtags=set(['zone']))
def stopdisk(self):
self.slave_stopdisk()
self.master_stopdisk()
@_slave_action
def slave_stopdisk(self):
self.encap_cmd(['stopdisk'], verbose=True)
@_master_action
def master_stopdisk(self):
self.sub_set_action("sync.btrfssnap", "stop")
self.sub_set_action(DISK_TYPES, "stop", xtags=set(['zone']))
self.sub_set_action("disk.scsireserv", "stop", xtags=set(['zone']))
@_master_action
def master_shutdowndisk(self):
self.sub_set_action("sync.btrfssnap", "shutdown")
self.sub_set_action(DISK_TYPES, "shutdown", xtags=set(['zone']))
self.sub_set_action("disk.scsireserv", "shutdown", xtags=set(['zone']))
def rollbackdisk(self):
self.sub_set_action(DISK_TYPES, "rollback", xtags=set(['zone']))
self.sub_set_action("disk.scsireserv", "rollback", xtags=set(['zone']))
def abort_start(self):
"""
Give a chance to all resources concerned by the action to voice up
their rebutal of the action before it begins.
"""
self.abort_start_done = True
if rcEnv.sysname == "Windows":
parallel = False
else:
try:
from multiprocessing import Process
parallel = True
def wrapper(func):
if func():
sys.exit(1)
except ImportError:
parallel = False
procs = {}
for resource in self.get_resources():
if resource.skip or resource.disabled:
continue
if not hasattr(resource, 'abort_start'):
continue
if not parallel:
if resource.abort_start():
raise ex.excError("start aborted due to resource %s "
"conflict" % resource.rid)
else:
proc = Process(target=wrapper, args=[resource.abort_start])
proc.start()
procs[resource.rid] = proc
if parallel:
err = []
for rid, proc in procs.items():
proc.join()
if proc.exitcode > 0:
err.append(rid)
if len(err) > 0:
raise ex.excError("start aborted due to resource %s "
"conflict" % ",".join(err))
def startip(self):
self.master_startip()
self.slave_startip()
@_slave_action
def slave_startip(self):
self.encap_cmd(['startip'], verbose=True)
@_master_action
def master_startstandbyip(self):
self.sub_set_action("ip", "startstandby", xtags=set(['zone', 'docker']))
@_master_action
def master_startip(self):
self.sub_set_action("ip", "start", xtags=set(['zone', 'docker']))
def stopip(self):
self.slave_stopip()
self.master_stopip()
@_slave_action
def slave_stopip(self):
self.encap_cmd(['stopip'], verbose=True)
@_master_action
def master_stopip(self):
self.sub_set_action("ip", "stop", xtags=set(['zone', 'docker']))
@_master_action
def master_shutdownip(self):
self.sub_set_action("ip", "shutdown", xtags=set(['zone', 'docker']))
def rollbackip(self):
self.sub_set_action("ip", "rollback", xtags=set(['zone', 'docker']))
def startshare(self):
self.master_startshare()
self.slave_startshare()
@_master_action
def master_startshare(self):
self.sub_set_action("share.nfs", "start")
@_master_action
def master_startstandbyshare(self):
self.sub_set_action("share", "startstandby")
@_slave_action
def slave_startshare(self):
self.encap_cmd(['startshare'], verbose=True)
def stopshare(self):
self.slave_stopshare()
self.master_stopshare()
@_master_action
def master_stopshare(self):
self.sub_set_action("share", "stop")
@_master_action
def master_shutdownshare(self):
self.sub_set_action("share", "shutdown")
@_slave_action
def slave_stopshare(self):
self.encap_cmd(['stopshare'], verbose=True)
def rollbackshare(self):
self.sub_set_action("share", "rollback")
def startfs(self):
self.master_startfs()
self.slave_startfs()
@_master_action
def master_startfs(self):
self.master_startdisk()
self.sub_set_action("fs", "start", xtags=set(['zone']))
@_master_action
def master_startstandbyfs(self):
self.master_startstandbydisk()
self.sub_set_action("fs", "startstandby", xtags=set(['zone']))
@_slave_action
def slave_startfs(self):
self.encap_cmd(['startfs'], verbose=True)
def stopfs(self):
self.slave_stopfs()
self.master_stopfs()
@_master_action
def master_stopfs(self):
self.sub_set_action("fs", "stop", xtags=set(['zone']))
self.master_stopdisk()
@_master_action
def master_shutdownfs(self):
self.sub_set_action("fs", "shutdown", xtags=set(['zone']))
self.master_shutdowndisk()
@_slave_action
def slave_stopfs(self):
self.encap_cmd(['stopfs'], verbose=True)
def rollbackfs(self):
self.sub_set_action("fs", "rollback", xtags=set(['zone']))
self.rollbackdisk()
def startcontainer(self):
self.abort_start()
self.master_startcontainer()
@_master_action
def master_startstandbycontainer(self):
self.sub_set_action("container", "startstandby")
self.refresh_ip_status()
@_master_action
def master_startcontainer(self):
self.sub_set_action("container", "start")
self.refresh_ip_status()
def refresh_ip_status(self):
""" Used after start/stop container because the ip resource
status change after its own start/stop
"""
for resource in self.get_resources("ip"):
resource.status(refresh=True, restart=False)
@_master_action
def shutdowncontainer(self):
self.sub_set_action("container", "shutdown")
self.refresh_ip_status()
@_master_action
def stopcontainer(self):
self.sub_set_action("container", "stop")
self.refresh_ip_status()
def rollbackcontainer(self):
self.sub_set_action("container", "rollback")
self.refresh_ip_status()
def unprovision(self):
self.sub_set_action("container", "unprovision")
self.sub_set_action("fs", "unprovision", xtags=set(['zone']))
self.sub_set_action("disk", "unprovision", xtags=set(['zone']))
self.sub_set_action("ip", "unprovision", xtags=set(['zone', 'docker']))
def provision(self):
self.sub_set_action("ip", "provision", xtags=set(['zone', 'docker']))
self.sub_set_action("disk", "provision", xtags=set(['zone']))
self.sub_set_action("fs", "provision", xtags=set(['zone']))
self.sub_set_action("container", "provision")
self.push()
def startapp(self):
self.master_startapp()
self.slave_startapp()
@_slave_action
def slave_startapp(self):
self.encap_cmd(['startapp'], verbose=True)
@_master_action
def master_startstandbyapp(self):
self.sub_set_action("app", "startstandby")
@_master_action
def master_startapp(self):
self.sub_set_action("app", "start")
def stopapp(self):
self.slave_stopapp()
self.master_stopapp()
@_slave_action
def slave_stopapp(self):
self.encap_cmd(['stopapp'], verbose=True)
@_master_action
def master_stopapp(self):
self.sub_set_action("app", "stop")
@_master_action
def master_shutdownapp(self):
self.sub_set_action("app", "shutdown")
def rollbackapp(self):
self.sub_set_action("app", "rollback")
def prstop(self):
self.slave_prstop()
self.master_prstop()
@_slave_action
def slave_prstop(self):
self.encap_cmd(['prstop'], verbose=True)
@_master_action
def master_prstop(self):
self.sub_set_action("disk.scsireserv", "scsirelease")
def prstart(self):
self.master_prstart()
self.slave_prstart()
@_slave_action
def slave_prstart(self):
self.encap_cmd(['prstart'], verbose=True)
@_master_action
def master_prstart(self):
self.sub_set_action("disk.scsireserv", "scsireserv")
def prstatus(self):
self.sub_set_action("disk.scsireserv", "scsicheckreserv")
def startstandby(self):
self.master_startstandby()
self.slave_startstandby()
@_master_action
def master_startstandby(self):
self.master_startstandbyip()
self.master_startstandbyfs()
self.master_startstandbyshare()
self.master_startstandbycontainer()
self.master_startstandbyapp()
@_slave_action
def slave_startstandby(self):
cmd = ['startstandby']
for container in self.get_resources('container'):
if not container.is_up() and \
rcEnv.nodename not in container.always_on:
# no need to try to startstandby the encap service on a
# container we not activated
continue
try:
self._encap_cmd(cmd, container, verbose=True)
except ex.excError:
self.log.error("container %s is not joinable to execute "
"action '%s'", container.name, ' '.join(cmd))
raise
def dns_update(self):
"""
Call the dns update method of each resource.
"""
self.all_set_action("dns_update")
def postsync(self):
""" action triggered by a remote master node after
sync_nodes and sync_drp. Typically make use of files
received in var/
"""
self.all_set_action("postsync")
def remote_postsync(self):
""" Release the svc lock at this point because the
waitlock timeout is long and we are done touching
local data.
Action triggered by a remote master node after
sync_nodes and sync_drp. Typically make use of files
received in var/.
use a long waitlock timeout to give a chance to
remote syncs to finish
"""
self.svcunlock()
for nodename in self.need_postsync:
self.remote_action(nodename, 'postsync', waitlock=3600)
self.need_postsync = set()
def remote_action(self, nodename, action, waitlock=DEFAULT_WAITLOCK,
sync=False, verbose=True, action_mode=True):
if self.options.cron:
# the scheduler action runs forked. don't use the cmdworker
# in this context as it may hang
sync = True
rcmd = [os.path.join(rcEnv.pathetc, self.svcname)]
if self.options.debug:
rcmd += ['--debug']
if self.options.cluster and action_mode:
rcmd += ['--cluster']
if self.options.cron:
rcmd += ['--cron']
if self.options.waitlock != DEFAULT_WAITLOCK:
rcmd += ['--waitlock', str(waitlock)]
rcmd += action.split()
cmd = rcEnv.rsh.split() + [nodename] + rcmd
if verbose:
self.log.info("exec '%s' on node %s", ' '.join(rcmd), nodename)
if sync:
out, err, ret = justcall(cmd)
return out, err, ret
else:
self.node.cmdworker.enqueue(cmd)
def presync(self):
""" prepare files to send to slave nodes in var/.
Each resource can prepare its own set of files.
"""
self.need_postsync = set()
if self.presync_done:
return
self.all_set_action("presync")
self.presync_done = True
def sync_nodes(self):
rtypes = [
"sync.rsync",
"sync.zfs",
"sync.btrfs",
"sync.docker",
"sync.dds",
]
if not self.can_sync(rtypes, 'nodes'):
return
self.presync()
for rtype in rtypes:
self.sub_set_action(rtype, "sync_nodes")
self.remote_postsync()
def sync_drp(self):
rtypes = [
"sync.rsync",
"sync.zfs",
"sync.btrfs",
"sync.docker",
"sync.dds",
]
if not self.can_sync(rtypes, 'drpnodes'):
return
self.presync()
for rtype in rtypes:
self.sub_set_action(rtype, "sync_drp")
self.remote_postsync()
def syncswap(self):
self.sub_set_action("sync.netapp", "syncswap")
self.sub_set_action("sync.symsrdfs", "syncswap")
self.sub_set_action("sync.hp3par", "syncswap")
self.sub_set_action("sync.nexenta", "syncswap")
def sync_revert(self):
self.sub_set_action("sync.hp3par", "sync_revert")
def sync_resume(self):
self.sub_set_action("sync.netapp", "sync_resume")
self.sub_set_action("sync.symsrdfs", "sync_resume")
self.sub_set_action("sync.hp3par", "sync_resume")
self.sub_set_action("sync.dcsckpt", "sync_resume")
self.sub_set_action("sync.nexenta", "sync_resume")
def sync_quiesce(self):
self.sub_set_action("sync.netapp", "sync_quiesce")
self.sub_set_action("sync.nexenta", "sync_quiesce")
def resync(self):
self.stop()
self.sync_resync()
self.start()
def sync_resync(self):
self.sub_set_action("sync.netapp", "sync_resync")
self.sub_set_action("sync.nexenta", "sync_resync")
self.sub_set_action("sync.rados", "sync_resync")
self.sub_set_action("sync.dds", "sync_resync")
self.sub_set_action("sync.symclone", "sync_resync")
self.sub_set_action("sync.symsnap", "sync_resync")
self.sub_set_action("sync.ibmdssnap", "sync_resync")
self.sub_set_action("sync.evasnap", "sync_resync")
self.sub_set_action("sync.necismsnap", "sync_resync")
self.sub_set_action("sync.dcssnap", "sync_resync")
def sync_break(self):
self.sub_set_action("sync.netapp", "sync_break")
self.sub_set_action("sync.nexenta", "sync_break")
self.sub_set_action("sync.hp3par", "sync_break")
self.sub_set_action("sync.dcsckpt", "sync_break")
self.sub_set_action("sync.symclone", "sync_break")
self.sub_set_action("sync.symsnap", "sync_break")
def sync_update(self):
self.sub_set_action("sync.netapp", "sync_update")
self.sub_set_action("sync.hp3par", "sync_update")
self.sub_set_action("sync.hp3parsnap", "sync_update")
self.sub_set_action("sync.nexenta", "sync_update")
self.sub_set_action("sync.dcsckpt", "sync_update")
self.sub_set_action("sync.dds", "sync_update")
self.sub_set_action("sync.btrfssnap", "sync_update")
self.sub_set_action("sync.zfssnap", "sync_update")
self.sub_set_action("sync.s3", "sync_update")
self.sub_set_action("sync.symclone", "sync_update")
self.sub_set_action("sync.symsnap", "sync_update")
self.sub_set_action("sync.ibmdssnap", "sync_update")
def sync_full(self):
self.sub_set_action("sync.dds", "sync_full")
self.sub_set_action("sync.zfs", "sync_full")
self.sub_set_action("sync.btrfs", "sync_full")
self.sub_set_action("sync.s3", "sync_full")
def sync_restore(self):
self.sub_set_action("sync.s3", "sync_restore")
def sync_split(self):
self.sub_set_action("sync.symsrdfs", "sync_split")
def sync_establish(self):
self.sub_set_action("sync.symsrdfs", "sync_establish")
def sync_verify(self):
self.sub_set_action("sync.dds", "sync_verify")
def print_config(self):
"""
The 'print config' action entry point.
Print the service configuration in the format specified by --format.
"""
if self.options.format is not None:
return self.print_config_data()
from rcColor import print_color_config
print_color_config(self.paths.cf)
def make_temp_config(self):
"""
Copy the current service configuration file to a temporary
location for edition.
If the temp file already exists, propose the --discard
or --recover options.
"""
import shutil
path = os.path.join(rcEnv.pathtmp, self.svcname+".conf.tmp")
if os.path.exists(path):
if self.options.recover:
pass
elif self.options.discard:
shutil.copy(self.paths.cf, path)
else:
raise ex.excError("%s exists: service is already being edited. "
"Set --discard to edit from the current "
"configuration, or --recover to open the "
"unapplied config" % path)
else:
shutil.copy(self.paths.cf, path)
return path
def edit_config(self):
"""
Execute an editor on the service configuration file.
When the editor exits, validate the new configuration file.
If validation pass, install the new configuration,
else keep the previous configuration in place and offer the
user the --recover or --discard choices for its next edit
config action.
"""
if "EDITOR" in os.environ:
editor = os.environ["EDITOR"]
elif os.name == "nt":
editor = "notepad"
else:
editor = "vi"
from rcUtilities import which
if not which(editor):
print("%s not found" % editor, file=sys.stderr)
return 1
path = self.make_temp_config()
os.environ["LANG"] = "en_US.UTF-8"
os.system(' '.join((editor, path)))
results = self._validate_config(path=path)
if results["errors"] == 0:
import shutil
shutil.copy(path, self.paths.cf)
os.unlink(path)
else:
print("your changes were not applied because of the errors "
"reported above. you can use the edit config command "
"with --recover to try to fix your changes or with "
"--discard to restart from the live config")
return results["errors"] + results["warnings"]
def can_sync(self, rtypes=None, target=None):
"""
Return True if any resource of type in yields it can sync.
"""
if rtypes is None:
rtypes = []
ret = False
for rtype in rtypes:
for resource in self.get_resources(rtype):
try:
ret |= resource.can_sync(target)
except ex.excError:
return False
if ret:
return True
self.log.debug("nothing to sync for the service for now")
return False
def sched_sync_all(self):
"""
The 'sync_all' scheduler task entrypoint.
"""
data = self.sched.skip_action("sync_all", deferred_write_timestamp=True)
if len(data["keep"]) == 0:
return
self._sched_sync_all(data["keep"])
@scheduler_fork
def _sched_sync_all(self, sched_options):
"""
Call the sync_all method of each sync resources that passed the
scheduler constraints.
"""
options = Storage(self.options)
options.rid = [option.section for option in sched_options]
self.action("sync_all", options)
self.sched.sched_write_timestamp(sched_options)
def sync_all(self):
"""
The 'sync all' action entrypoint.
"""
if not self.can_sync(["sync"]):
return
if self.options.cron:
self.sched.sched_delay()
self.presync()
self.sub_set_action("sync.rsync", "sync_nodes")
self.sub_set_action("sync.zfs", "sync_nodes")
self.sub_set_action("sync.btrfs", "sync_nodes")
self.sub_set_action("sync.docker", "sync_nodes")
self.sub_set_action("sync.dds", "sync_nodes")
self.sub_set_action("sync.rsync", "sync_drp")
self.sub_set_action("sync.zfs", "sync_drp")
self.sub_set_action("sync.btrfs", "sync_drp")
self.sub_set_action("sync.docker", "sync_drp")
self.sub_set_action("sync.dds", "sync_drp")
self.sync_update()
self.remote_postsync()
def push_service_status(self):
"""
The 'push_service_status' scheduler task and action entrypoint.
This method returns early if called from an encapsulated agent, as
the master agent is responsible for pushing the encapsulated
status.
"""
if self.encap:
if not self.options.cron:
self.log.info("push service status is disabled for encapsulated services")
return
if self.sched.skip_action("push_service_status"):
return
self.task_push_service_status()
@scheduler_fork
def task_push_service_status(self):
"""
Refresh and push the service status to the collector.
"""
if self.options.cron:
self.sched.sched_delay()
import rcSvcmon
self.options.refresh = True
rcSvcmon.svcmon_normal([self])
def push_resinfo(self):
"""
The 'push_resinfo' scheduler task and action entrypoint.
"""
if self.sched.skip_action("push_resinfo"):
return
self.task_push_resinfo()
@scheduler_fork
def task_push_resinfo(self):
"""
Push the per-resource key/value pairs to the collector.
"""
if self.options.cron:
self.sched.sched_delay()
self.node.collector.call('push_resinfo', [self])
def push_config(self):
"""
The 'push_config' scheduler task entrypoint.
"""
if self.sched.skip_action("push_config"):
return
self.push()
def create_var_subdir(self):
"""
Create the service-dedicated subdir in .
"""
var_d = os.path.join(rcEnv.pathvar, self.svcname)
if not os.path.exists(var_d):
os.makedirs(var_d)
def autopush(self):
"""
If the configuration file has been modified since the last push
to the collector, call the push method.
"""
if not self.collector_outdated():
return
if len(self.log.handlers) > 1:
self.log.handlers[1].setLevel(logging.CRITICAL)
try:
self.push()
finally:
if len(self.log.handlers) > 1:
self.log.handlers[1].setLevel(rcEnv.loglevel)
@scheduler_fork
def push(self):
"""
The 'push' action entrypoint.
Synchronize the configuration file between encap and master agent,
then send the configuration to the collector.
Finally update the last push on-disk timestamp.
This action is skipped when run by an encapsulated agent.
"""
if self.encap:
return
if self.options.cron:
self.sched.sched_delay()
self.push_encap_config()
self.node.collector.call('push_all', [self])
self.log.info("send %s to collector", self.paths.cf)
try:
self.create_var_subdir()
import time
with open(self.paths.push_flag, 'w') as ofile:
ofile.write(str(time.time()))
self.log.info("update %s timestamp", self.paths.push_flag)
except (OSError, IOError):
self.log.error("failed to update %s timestamp", self.paths.push_flag)
def push_encap_config(self):
"""
Verify the service has an encapsulated part, and if so, for each
container in up state running an encapsulated part, synchronize the
service configuration file.
"""
if self.encap or not self.has_encap_resources:
return
for resource in self.get_resources('container'):
if resource.status(ignore_nostatus=True) not in (rcStatus.STDBY_UP, rcStatus.UP):
continue
self._push_encap_config(resource)
def _push_encap_config(self, container):
"""
Compare last modification time of the master and slave service
configuration file, and copy the most recent version over the least
recent.
"""
cmd = ['print', 'config', 'mtime']
try:
cmd_results = self._encap_cmd(cmd, container)
out = cmd_results[0]
ret = cmd_results[2]
except ex.excError:
out = None
ret = 1
paths = get_osvc_paths(osvc_root_path=container.osvc_root_path,
sysname=container.guestos)
encap_cf = os.path.join(paths.pathetc, os.path.basename(self.paths.cf))
if out == "":
# this is what happens when the container is down
return
if ret == 0:
encap_mtime = int(float(out.strip()))
local_mtime = int(os.stat(self.paths.cf).st_mtime)
if encap_mtime > local_mtime:
if hasattr(container, 'rcp_from'):
cmd_results = container.rcp_from(encap_cf, rcEnv.pathetc+'/')
else:
cmd = rcEnv.rcp.split() + [container.name+':'+encap_cf, rcEnv.pathetc+'/']
cmd_results = justcall(cmd)
os.utime(self.paths.cf, (encap_mtime, encap_mtime))
self.log.info("fetch %s from %s", encap_cf, container.name)
if cmd_results[2] != 0:
raise ex.excError()
return
elif encap_mtime == local_mtime:
return
if hasattr(container, 'rcp'):
cmd_results = container.rcp(self.paths.cf, encap_cf)
else:
cmd = rcEnv.rcp.split() + [self.paths.cf, container.name+':'+encap_cf]
cmd_results = justcall(cmd)
if cmd_results[2] != 0:
raise ex.excError("failed to send %s to %s" % (self.paths.cf, container.name))
self.log.info("send %s to %s", self.paths.cf, container.name)
cmd = ['create', '--config', encap_cf]
cmd_results = self._encap_cmd(cmd, container=container)
if cmd_results[2] != 0:
raise ex.excError("failed to create %s slave service" % container.name)
self.log.info("create %s slave service", container.name)
@staticmethod
def _tag_match(rtags, keeptags):
"""
Return True if any tag of is in .
"""
for tag in rtags:
if tag in keeptags:
return True
return False
def set_skip_resources(self, keeprid=None, xtags=None):
"""
Set the 'skip' flag of all resources.
* set to False if keeprid is empty and xtags is empty
* set to False if rid is in keeprid and not in xtags
* else set to True
"""
if keeprid is None:
keeprid = []
if xtags is None:
xtags = set()
ridfilter = len(keeprid) > 0
tagsfilter = len(xtags) > 0
if not tagsfilter and not ridfilter:
return
for resource in self.get_resources():
if self._tag_match(resource.tags, xtags):
resource.skip = True
if ridfilter and resource.rid in keeprid:
continue
resource.skip = True
def setup_environ(self, action=None):
"""
Setup envionment variables.
Startup scripts and triggers can use them, so their code can be
more generic.
All resources can contribute a set of env variables through their
own setup_environ() method.
"""
os.environ['OPENSVC_SVCNAME'] = self.svcname
if action:
os.environ['OPENSVC_ACTION'] = action
for resource in self.get_resources():
resource.setup_environ()
def all_rids(self):
return [rid for rid in self.resources_by_id if rid is not None] + \
list(self.encap_resources.keys())
def expand_rid(self, rid):
"""
Given a rid return a set containing either the rid itself if it is
a known rid, or containing the rid of all resources whose prefix
matches the name given as rid.
"""
retained_rids = set()
for _rid in self.all_rids():
if '#' in rid:
if _rid == rid:
retained_rids.add(_rid)
else:
continue
elif _rid[:_rid.index('#')] == rid:
retained_rids.add(_rid)
return retained_rids
def expand_rids(self, rids):
"""
Parse the --rid value and return the retained corresponding resource
ids.
Filter out non existing resource ids.
If a rid has no "#", expand to the set of rids of resources whose
prefix matches the name given as a rid.
Example:
--rid disk: return all rids of disk resources.
--rid disk#0: return disk#0 if such a resource exists
"""
if len(rids) == 0:
return
retained_rids = set()
for rid in set(rids):
retained_rids |= self.expand_rid(rid)
if len(retained_rids) > 0:
self.log.debug("rids added from --rid %s: %s", ",".join(rids),
",".join(retained_rids))
return retained_rids
def expand_subsets(self, subsets):
"""
Parse the --subsets value and return the retained corresponding resource
ids.
"""
if subsets is None or self.options.subsets is None:
return
retained_rids = set()
for resource in self.resources_by_id.values() + self.encap_resources.values():
if resource.subset in subsets:
retained_rids.add(resource.rid)
if len(retained_rids) > 0:
self.log.debug("rids added from --subsets %s: %s",
",".join(subsets), ",".join(retained_rids))
return retained_rids
def expand_tags(self, tags):
"""
Parse the --tags value and return the retained corresponding resource
ids.
',' is interpreted as OR
'+' is interpreted as AND
'+' are evaluated before ','
Example:
--tags A,B : return rids of resource with either tag A or B
--tags A+B : return rids of resource with both tags A and B
--tags A+B,B+C : return rids of resource with either tags A and B
or tags B and C
"""
if len(tags) == 0 or tags is None:
return
retained_rids = set()
unions = []
intersection = []
for idx, tag in enumerate(tags):
if tag[0] == "+":
tag = tag[1:]
intersection.append(tag)
if idx == len(tags) - 1:
unions.append(intersection)
else:
if len(intersection) > 0:
# new intersection, store the current
unions.append(intersection)
# open a new intersection
intersection = [tag]
if idx == len(tags) - 1:
unions.append(intersection)
for intersection in unions:
for resource in self.resources_by_id.values() + self.encap_resources.values():
if set(intersection) & resource.tags == set(intersection):
retained_rids.add(resource.rid)
if len(retained_rids) > 0:
self.log.debug("rids added from --tags %s: %s", ",".join(tags),
",".join(retained_rids))
return retained_rids
def always_on_resources(self):
"""
Return the list of resources flagged always on on this node
"""
return [resource for resource in self.resources_by_id.values()
if rcEnv.nodename in resource.always_on]
def prepare_options(self, options):
"""
Return a Storage() from command line options or dict passed as
, sanitized, merge with default values in self.options.
"""
if options is None:
options = Storage()
elif isinstance(options, dict):
options = Storage(options)
if is_string(options.slave):
options.slave = options.slave.split(',')
if isinstance(options.resource, list):
import json
for idx, resource in enumerate(options.resource):
if not is_string(resource):
continue
options.resource[idx] = json.loads(resource)
self.options.update(options)
options = self.options
return options
def action(self, action, options=None):
"""
The service action main entrypoint.
Handle the run file flag creation after the action is done,
whatever its status.
"""
try:
return self._action(action, options)
finally:
if action != "scheduler":
self.set_run_flag()
def options_to_rids(self, options):
"""
Return the list of rids to apply an action to, from the command
line options passed as .
"""
rid = options.get("rid", None)
tags = options.get("tags", None)
subsets = options.get("subsets", None)
xtags = options.get("xtags", None)
if rid is None:
rid = []
elif is_string(rid):
rid = rid.split(',')
if tags is None:
tags = []
elif is_string(tags):
tags = tags.replace("+", ",+").split(',')
if subsets is None:
subsets = []
elif is_string(subsets):
subsets = subsets.split(',')
if xtags is None:
xtags = set()
elif is_string(xtags):
xtags = xtags.split(',')
if len(self.resources_by_id.keys()) > 0:
rids = set(self.all_rids())
# --rid
retained_rids = self.expand_rids(rid)
if retained_rids is not None:
rids &= retained_rids
# --subsets
retained_rids = self.expand_subsets(subsets)
if retained_rids is not None:
rids &= retained_rids
# --tags
retained_rids = self.expand_tags(tags)
if retained_rids is not None:
rids &= retained_rids
rids = list(rids)
self.log.debug("rids retained after expansions intersection: %s",
",".join(rids))
if self.command_is_scoped(options) and len(rids) == 0:
raise ex.excAbortAction("no resource match the given --rid, --subset "
"and --tags specifiers")
else:
# no resources certainly mean the build was done with minimal=True
# let the action go on. 'delete', for one, takes a --rid but does
# not need resource initialization
rids = rid
return rids
def _action(self, action, options):
"""
Filter resources on which the service action must act.
Abort if the service is frozen, or if --cluster is not set on a HA
service.
Set up the environment variables.
Finally do the service action either in logged or unlogged mode.
"""
self.allow_on_this_node(action)
options = self.prepare_options(options)
try:
self.action_rid_before_depends = self.options_to_rids(options)
except ex.excAbortAction as exc:
self.log.info(exc)
return
depends = set()
for rid in self.action_rid_before_depends:
depends |= self.action_rid_dependencies(action, rid) - set(self.action_rid_before_depends)
if len(depends) > 0:
self.log.info("add rid %s to satisfy dependencies" % ", ".join(depends))
self.action_rid = list(set(self.action_rid_before_depends) | depends)
else:
self.action_rid = list(self.action_rid_before_depends)
self.action_rid_depends = list(depends)
self.action_start_date = datetime.datetime.now()
if self.node is None:
self.node = node.Node()
if self.svc_env != 'PRD' and rcEnv.node_env == 'PRD':
self.log.error("Abort action for non PRD service on PRD node")
return 1
if action not in ACTIONS_ALLOW_ON_FROZEN and \
'compliance' not in action and \
'collector' not in action:
if self.frozen() and not options.force:
self.log.info("Abort action '%s' for frozen service. Use "
"--force to override.", action)
return 1
if action == "boot" and len(self.always_on_resources()) == 0 and \
len(self.get_resources('hb')) > 0:
self.log.info("end boot action on cluster node before "
"acquiring the action lock: no stdby resource "
"needs activation.")
return 0
try:
self.cluster_mode_safety_net(action, options)
except ex.excAbortAction as exc:
self.log.info(str(exc))
return 0
except ex.excEndAction as exc:
self.log.info(str(exc))
return 0
except ex.excError as exc:
self.log.error(str(exc))
return 1
#
# here we know we will run a resource state-changing action
# purge the resource status file cache, so that we don't take
# decision on outdated information
#
if not options.dry_run and action != "resource_monitor":
self.log.debug("purge all resource status file caches")
self.purge_status_last()
self.setup_environ(action=action)
self.setup_signal_handlers()
self.set_skip_resources(keeprid=self.action_rid, xtags=options.xtags)
if action.startswith("print_") or \
action.startswith("collector") or \
action.startswith("json_"):
return self.do_print_action(action, options)
if action in ACTIONS_NO_LOG or \
action.startswith("compliance") or \
action.startswith("docker") or \
options.dry_run:
err = self.do_action(action, options)
else:
err = self.do_logged_action(action, options)
return err
def do_print_action(self, action, options):
"""
Call the service method associated with action. This method produces
data the caller will print.
If --cluster is set, execute the action on remote nodes and
aggregate the results.
"""
_action = action + ""
if action.startswith("json_"):
action = "print_"+action[5:]
self.node.options.format = "json"
self.options.format = "json"
options.format = "json"
if "_json_" in action:
action = action.replace("_json_", "_")
self.node.options.format = "json"
self.options.format = "json"
options.format = "json"
if options.cluster and options.format != "json":
raise ex.excError("only the json output format is allowed with --cluster")
if action.startswith("collector_"):
from collector import Collector
collector = Collector(options, self.node, self.svcname)
func = getattr(collector, action)
else:
func = getattr(self, action)
if not hasattr(func, "__call__"):
raise ex.excError("%s is not callable" % action)
psinfo = self.do_cluster_action(_action, collect=True, action_mode=False)
try:
data = func()
except Exception as exc:
data = {"error": str(exc)}
if psinfo:
# --cluster is set and we have remote responses
results = self.join_cluster_action(**psinfo)
for nodename in results:
results[nodename] = results[nodename][0]
if options.format == "json":
import json
try:
results[nodename] = json.loads(results[nodename])
except ValueError as exc:
results[nodename] = {"error": str(exc)}
results[rcEnv.nodename] = data
return results
elif options.cluster:
# no remote though --cluster is set
results = {}
results[rcEnv.nodename] = data
return results
return data
def do_cluster_action(self, action, waitlock=60, collect=False, action_mode=True):
"""
Execute an action on remote nodes if --cluster is set and the
service is a flex, and this node is flex primary.
edit config, validate config, and sync* are never executed through
this method.
If possible execute in parallel running subprocess. Aggregate and
return results.
"""
if not self.options.cluster:
return
if action in ("edit_config", "validate_config") or "sync" in action:
return
if action_mode and "flex" not in self.clustertype:
return
if "flex" in self.clustertype:
if rcEnv.nodename == self.drp_flex_primary:
peers = set(self.drpnodes) - set([rcEnv.nodename])
elif rcEnv.nodename == self.flex_primary:
peers = set(self.nodes) - set([rcEnv.nodename])
else:
return
elif not action_mode:
if rcEnv.nodename in self.nodes:
peers = set(self.nodes) | set(self.drpnodes)
else:
peers = set(self.drpnodes)
peers -= set([rcEnv.nodename])
args = [arg for arg in sys.argv[1:] if arg != "--cluster"]
if self.options.docker_argv and len(self.options.docker_argv) > 0:
args += self.options.docker_argv
def wrapper(queue, **kwargs):
"""
Execute the remote action and enqueue or print results.
"""
collect = kwargs["collect"]
del kwargs["collect"]
out, err, ret = self.remote_action(**kwargs)
if collect:
queue.put([out, err, ret])
else:
if len(out):
print(out)
if len(err):
print(err)
return out, err, ret
if rcEnv.sysname == "Windows":
parallel = False
else:
try:
from multiprocessing import Process, Queue
parallel = True
results = None
procs = {}
queues = {}
except ImportError:
parallel = False
results = {}
procs = None
queues = None
for nodename in peers:
kwargs = {
"nodename": nodename,
"action": " ".join(args),
"waitlock": waitlock,
"verbose": False,
"sync": True,
"action_mode": action_mode,
"collect": collect,
}
if parallel:
queues[nodename] = Queue()
proc = Process(target=wrapper, args=(queues[nodename],), kwargs=kwargs)
proc.start()
procs[nodename] = proc
else:
results[nodename] = wrapper(**kwargs)
return {"procs": procs, "queues": queues, "results": results}
@staticmethod
def join_cluster_action(procs=None, queues=None, results=None):
"""
Wait for subprocess to finish, aggregate and return results.
"""
if procs is None or queues is None:
return results
results = {}
joined = []
while len(joined) < len(procs):
for nodename, proc in procs.items():
proc.join(0.1)
if not proc.is_alive():
joined.append(nodename)
queue = queues[nodename]
if not queue.empty():
results[nodename] = queue.get()
return results
def do_action(self, action, options):
"""
Acquire the service action lock, call the service action method,
handles its errors, and finally release the lock.
If --cluster is set, and the service is a flex, and we are
flex_primary run the action on all remote nodes.
"""
if action not in ACTIONS_NO_LOCK and self.clustertype not in CLUSTER_TYPES:
raise ex.exError("invalid cluster type '%s'. allowed: %s" % (
self.clustertype,
', '.join(CLUSTER_TYPES),
))
err = 0
if options.waitlock >= 0:
waitlock = options.waitlock
else:
waitlock = self.lock_timeout
try:
self.svclock(action, timeout=waitlock)
except ex.excError as exc:
self.log.error(str(exc))
return 1
psinfo = self.do_cluster_action(action, options)
try:
if action.startswith("compliance_"):
err = getattr(self.compliance, action)()
elif hasattr(self, action):
self.running_action = action
err = getattr(self, action)()
if err is None:
err = 0
else:
self.log.error("unsupported action %s", action)
err = 1
except ex.excEndAction as exc:
msg = "'%s' action ended by last resource" % action
if len(str(exc)) > 0:
msg += ": %s" % str(exc)
self.log.info(msg)
err = 0
except ex.excAbortAction as exc:
msg = "'%s' action aborted by last resource" % action
if len(str(exc)) > 0:
msg += ": %s" % str(exc)
self.log.info(msg)
err = 0
except ex.excError as exc:
msg = "'%s' action stopped on execution error" % action
self.log.debug(msg)
err = 1
self.rollback_handler(action)
except ex.excSignal:
self.log.error("interrupted by signal")
err = 1
except ex.MonitorAction:
self.svcunlock()
raise
except:
err = 1
self.save_exc()
finally:
self.running_action = None
self.svcunlock()
if action == "start" and self.options.cluster and self.ha:
# This situation is typical of a hb-initiated service start.
# While the hb starts the service, its resource status is warn from
# opensvc point of view. So after a successful startup, the hb res
# status would stay warn until the next svcmon.
# To avoid this drawback we can force from here the hb status.
if err == 0:
for resource in self.get_resources(['hb']):
if resource.disabled:
continue
resource.force_status(rcStatus.UP)
if psinfo:
self.join_cluster_action(**psinfo)
return err
def rollback_handler(self, action):
"""
Call the rollback method if
* the action triggering this handler is a start*
* service is not configured to not disable rollback
* --disable-rollback is not set
* at least one resource has been flagged rollbackable during the
start* action
"""
if 'start' not in action:
return
if self.options.disable_rollback:
self.log.info("skip rollback %s: as instructed by --disable-rollback", action)
return
if self.disable_rollback:
self.log.info("skip rollback %s: as instructed by DEFAULT.rollback=false", action)
return
rids = [r.rid for r in self.get_resources() if r.can_rollback and not r.always_on]
if len(rids) == 0:
self.log.info("skip rollback %s: no resource activated", action)
return
self.log.info("trying to rollback %s on %s", action, ', '.join(rids))
try:
self.rollback()
except ex.excError:
self.log.error("rollback %s failed", action)
def do_logged_action(self, action, options):
"""
Setup action logging to a machine-readable temp logfile, in preparation
to the collector feeding.
Do the action.
Finally, feed the log to the collector.
"""
import tempfile
begin = datetime.datetime.now()
# Provision a database entry to store action log later
if action in ('postsync', 'shutdown'):
# don't loose the action log on node shutdown
# no background dblogger for remotely triggered postsync
self.sync_dblogger = True
self.node.collector.call('begin_action', self, action, begin,
sync=self.sync_dblogger)
# Per action logfile to push to database at the end of the action
tmpfile = tempfile.NamedTemporaryFile(delete=False, dir=rcEnv.pathtmp,
prefix=self.svcname+'.'+action)
actionlogfile = tmpfile.name
tmpfile.close()
log = logging.getLogger()
fmt = "%(asctime)s;;%(name)s;;%(levelname)s;;%(message)s;;%(process)d;;EOL"
actionlogformatter = logging.Formatter(fmt)
actionlogfilehandler = logging.FileHandler(actionlogfile)
actionlogfilehandler.setFormatter(actionlogformatter)
actionlogfilehandler.setLevel(logging.INFO)
log.addHandler(actionlogfilehandler)
if "/svcmgr.py" in sys.argv:
self.log.info(" ".join(sys.argv))
err = self.do_action(action, options)
# Push result and logs to database
actionlogfilehandler.close()
log.removeHandler(actionlogfilehandler)
end = datetime.datetime.now()
self.dblogger(action, begin, end, actionlogfile)
return err
def restart(self):
"""
The 'restart' action entrypoint.
This action translates into 'stop' followed by 'start'
"""
self.stop()
self.start()
def _migrate(self):
"""
Call the migrate action on all relevant resources.
"""
self.sub_set_action("container.ovm", "_migrate")
self.sub_set_action("container.hpvm", "_migrate")
self.sub_set_action("container.esx", "_migrate")
def destination_node_sanity_checks(self):
"""
Raise an excError if
* the destination node --to arg not set
* the specified destination is the current node
* the specified destination is not a service candidate node
"""
if self.options.destination_node is None:
raise ex.excError("a destination node must be provided this action")
if self.options.destination_node == rcEnv.nodename:
raise ex.excError("the destination is the source node")
if self.options.destination_node not in self.nodes:
raise ex.excError("the destination node %s is not in the service "
"nodes list" % self.options.destination_node)
@_master_action
def migrate(self):
"""
Service online migration.
"""
self.destination_node_sanity_checks()
self.master_prstop()
try:
self.remote_action(nodename=self.options.destination_node, action='startfs --master')
self._migrate()
except:
if self.has_resourceset(['disk.scsireserv']):
self.log.error("scsi reservations were dropped. you have to "
"acquire them now using the 'prstart' action "
"either on source node or destination node, "
"depending on your problem analysis.")
raise
self.master_stopfs()
self.remote_action(nodename=self.options.destination_node, action='prstart --master')
def switch(self):
"""
Service move to another node.
"""
self.destination_node_sanity_checks()
self.sub_set_action("hb", "switch")
self.stop()
self.remote_action(nodename=self.options.destination_node, action='start')
def collector_rest_get(self, *args, **kwargs):
kwargs["svcname"] = self.svcname
return self.node.collector_rest_get(*args, **kwargs)
def collector_rest_post(self, *args, **kwargs):
kwargs["svcname"] = self.svcname
return self.node.collector_rest_post(*args, **kwargs)
def collector_rest_put(self, *args, **kwargs):
kwargs["svcname"] = self.svcname
return self.node.collector_rest_put(*args, **kwargs)
def collector_rest_delete(self, *args, **kwargs):
kwargs["svcname"] = self.svcname
return self.node.collector_rest_delete(*args, **kwargs)
def collector_outdated(self):
"""
Return True if the configuration file has changed since last push.
"""
if self.encap:
return False
if not os.path.exists(self.paths.push_flag):
self.log.debug("no last push timestamp found")
return True
if not os.path.exists(self.paths.cf):
# happens in 'pull' action codepath
self.log.debug("no config file found")
return False
try:
mtime = os.stat(self.paths.cf).st_mtime
with open(self.paths.push_flag) as flag:
last_push = float(flag.read())
except (ValueError, IOError, OSError):
self.log.error("can not read timestamp from %s or %s",
self.paths.cf, self.paths.push_flag)
return True
if mtime > last_push:
self.log.debug("configuration file changed since last push")
return True
return False
def write_config(self):
"""
Rewrite the service configuration file, using the current parser
object in self.config write method.
Also reset the file mode to 644.
"""
import tempfile
import shutil
try:
tmpfile = tempfile.NamedTemporaryFile()
fname = tmpfile.name
tmpfile.close()
with open(fname, "w") as tmpfile:
self.config.write(tmpfile)
shutil.move(fname, self.paths.cf)
except (OSError, IOError) as exc:
print("failed to write new %s (%s)" % (self.paths.cf, str(exc)),
file=sys.stderr)
raise ex.excError()
try:
os.chmod(self.paths.cf, 0o0644)
except (OSError, IOError) as exc:
self.log.debug("failed to set %s mode: %s", self.paths.cf, str(exc))
def load_config(self):
"""
Initialize the service configuration parser object. Using an
OrderDict type to preserve the options and sections ordering,
if possible.
The parser object is a opensvc-specified class derived from
optparse.RawConfigParser.
"""
try:
from collections import OrderedDict
self.config = RawConfigParser(dict_type=OrderedDict)
except ImportError:
self.config = RawConfigParser()
self.config.read(self.paths.cf)
def unset(self):
"""
The 'unset' action entrypoint.
Verifies the --param and --value are set, set DEFAULT as section
if no section was specified, and finally call the _unset internal
method.
"""
if self.options.param is None:
print("no parameter. set --param", file=sys.stderr)
return 1
elements = self.options.param.split('.')
if len(elements) == 1:
elements.insert(0, "DEFAULT")
elif len(elements) != 2:
print("malformed parameter. format as 'section.key'", file=sys.stderr)
return 1
section, option = elements
try:
self._unset(section, option)
return 0
except ex.excError as exc:
print(exc, file=sys.stderr)
return 1
def _unset(self, section, option):
"""
Delete an option in the service configuration file specified section.
"""
section = "[%s]" % section
lines = self._read_cf().splitlines()
need_write = False
in_section = False
for i, line in enumerate(lines):
sline = line.strip()
if sline == section:
in_section = True
elif in_section:
if sline.startswith("["):
break
elif "=" in sline:
elements = sline.split("=")
_option = elements[0].strip()
if option != _option:
continue
del lines[i]
need_write = True
while i < len(lines) and "=" not in lines[i] and \
not lines[i].strip().startswith("[") and \
lines[i].strip() != "":
del lines[i]
if not in_section:
raise ex.excError("section %s not found" % section)
if not need_write:
raise ex.excError("option '%s' not found in section %s" % (option, section))
buff = "\n".join(lines) + "\n"
try:
self._write_cf(buff)
except (IOError, OSError) as exc:
raise ex.excError(str(exc))
def get(self):
"""
The 'get' action entrypoint.
Verifies the --param and --value are set, set DEFAULT as section
if no section was specified, and finally,
* print the raw value if --eval is not set
* print the dereferenced and evaluated value if --eval is set
"""
self.load_config()
if self.options.param is None:
print("no parameter. set --param", file=sys.stderr)
return 1
elements = self.options.param.split('.')
if len(elements) == 1:
elements.insert(0, "DEFAULT")
elif len(elements) != 2:
print("malformed parameter. format as 'section.key'", file=sys.stderr)
return 1
section, option = elements
if section != 'DEFAULT' and not self.config.has_section(section):
print("section [%s] not found"%section, file=sys.stderr)
return 1
if not self.config.has_option(section, option):
print("option '%s' not found in section [%s]"%(option, section), file=sys.stderr)
return 1
if self.options.eval:
from svcBuilder import conf_get
print(conf_get(self, self.config, section, option, "string", scope=True))
else:
print(self.config.get(section, option))
return 0
def set(self):
"""
The 'set' action entrypoint.
Verifies the --param and --value are set, set DEFAULT as section
if no section was specified, and set the value using the internal
_set() method.
"""
self.load_config()
if self.options.param is None:
print("no parameter. set --param", file=sys.stderr)
return 1
if self.options.value is None:
print("no value. set --value", file=sys.stderr)
return 1
elements = self.options.param.split('.')
if len(elements) == 1:
elements.insert(0, "DEFAULT")
elif len(elements) != 2:
print("malformed parameter. format as 'section.key'", file=sys.stderr)
return 1
try:
self._set(elements[0], elements[1], self.options.value)
except ex.excError as exc:
print(exc, file=sys.stderr)
return 1
return 0
def setenv(self, args, interactive=False):
"""
For each option in the 'env' section of the configuration file,
* rewrite the value using the value specified in a corresponding
--env = commandline arg
* or prompt for the value if --interactive is set, and rewrite
* or leave the value as is, considering the default is accepted
"""
explicit_options = []
for arg in args:
idx = arg.index("=")
option = arg[:idx]
value = arg[idx+1:]
self._set("env", option, value)
explicit_options.append(option)
if not interactive:
return
if not os.isatty(0):
raise ex.excError("--interactive is set but input fd is not a tty")
def get_href(ref):
ref = ref.strip("[]")
try:
response = node.urlopen(ref)
return response.read()
except:
return ""
def print_comment(comment):
"""
Print a env keyword comment. For use in the interactive service
create codepath.
"""
import re
comment = re.sub("(\[.+://.+])", lambda m: get_href(m.group(1)), comment)
print(comment)
for key, default_val in self.env_section_keys().items():
if key.endswith(".comment"):
continue
if key in explicit_options:
continue
if self.config.has_option("env", key+".comment"):
print_comment(self.config.get("env", key+".comment"))
newval = raw_input("%s [%s] > " % (key, str(default_val)))
if newval != "":
self._set("env", key, newval)
def _set(self, section, option, value):
"""
Set to in of the configuration file.
"""
section = "[%s]" % section
lines = self._read_cf().splitlines()
done = False
in_section = False
value = try_decode(value)
for idx, line in enumerate(lines):
sline = line.strip()
if sline == section:
in_section = True
elif in_section:
if sline.startswith("[") and not done:
# section found and parsed and no option => add option
section_idx = idx
while section_idx > 0 and lines[section_idx-1].strip() == "":
section_idx -= 1
lines.insert(section_idx, "%s = %s" % (option, value))
done = True
break
elif "=" in sline:
elements = sline.split("=")
_option = elements[0].strip()
if option != _option:
continue
if done:
# option already set : remove dup
del lines[idx]
while idx < len(lines) and "=" not in lines[idx] and \
not lines[idx].strip().startswith("[") and \
lines[idx].strip() != "":
del lines[idx]
continue
_value = elements[1].strip()
section_idx = idx
while section_idx < len(lines)-1 and \
"=" not in lines[section_idx+1] and \
not lines[section_idx+1].strip().startswith("["):
section_idx += 1
if lines[section_idx].strip() == "":
continue
_value += " %s" % lines[section_idx].strip()
if value.replace("\n", " ") == _value:
return
lines[idx] = "%s = %s" % (option, value)
section_idx = idx
while section_idx < len(lines)-1 and \
"=" not in lines[section_idx+1] and \
not lines[section_idx+1].strip().startswith("[") and \
lines[section_idx+1].strip() != "":
del lines[section_idx+1]
done = True
if not done:
while lines[-1].strip() == "":
lines.pop()
if not in_section:
# section in last position and no option => add section
lines.append("")
lines.append(section)
lines.append("%s = %s" % (option, value))
buff = "\n".join(lines) + "\n"
try:
self._write_cf(buff)
except (IOError, OSError) as exc:
raise ex.excError(str(exc))
def set_disable(self, rids=None, disable=True):
"""
Set the disable to (True|False) in the configuration file,
* at DEFAULT level if no resources were specified
* in each resource section if resources were specified
"""
if rids is None:
rids = []
if not self.command_is_scoped() and \
(len(rids) == 0 or len(rids) == len(self.resources_by_id)):
rids = ['DEFAULT']
for rid in rids:
if rid != 'DEFAULT' and not self.config.has_section(rid):
self.log.error("service %s has no resource %s", self.svcname, rid)
continue
self.log.info("set %s.disable = %s", rid, str(disable))
self.config.set(rid, "disable", str(disable).lower())
#
# if we set DEFAULT.disable = True,
# we don't want res#n.disable = False
#
if rids == ["DEFAULT"] and disable:
for section in self.config.sections():
if self.config.has_option(section, "disable") and \
not self.config.getboolean(section, "disable"):
self.log.info("remove %s.disable = false", section)
self.config.remove_option(section, "disable")
try:
self.write_config()
except (IOError, OSError) as exc:
self.log.error(str(exc))
return 1
return 0
def enable(self):
"""
The 'enable' action entrypoint.
"""
return self.set_disable(self.action_rid, False)
def disable(self):
"""
The 'disable' action entrypoint.
"""
return self.set_disable(self.action_rid, True)
def delete(self):
"""
The 'delete' action entrypoint.
If --unprovision is set, call the unprovision method.
Then if no resource specifier is set, remove all service files in
.
If a resource specifier is set, only delete the corresponding
sections in the configuration file.
"""
if self.options.unprovision:
self.unprovision()
if not self.command_is_scoped() or \
len(self.action_rid) == len(self.resources_by_id.keys()):
import shutil
dpaths = [
os.path.join(rcEnv.pathetc, self.svcname+".dir"),
os.path.join(rcEnv.pathetc, self.svcname+".d"),
]
fpaths = [
self.paths.cf,
os.path.join(rcEnv.pathetc, self.svcname),
os.path.join(rcEnv.pathetc, self.svcname+".d"),
os.path.join(rcEnv.pathetc, self.svcname+".cluster"),
os.path.join(rcEnv.pathetc, self.svcname+".stonith"),
]
for fpath in fpaths:
if os.path.exists(fpath) and \
(os.path.islink(fpath) or os.path.isfile(fpath)):
self.log.info("remove %s", fpath)
os.unlink(fpath)
for dpath in dpaths:
if os.path.exists(dpath):
self.log.info("remove %s", dpath)
shutil.rmtree(dpath)
return 0
lines = self._read_cf().splitlines()
need_write = False
for rid in self.action_rid:
section = "[%s]" % rid
in_section = False
for i, line in enumerate(lines):
sline = line.strip()
if sline == section:
in_section = True
need_write = True
del lines[i]
while i < len(lines) and not lines[i].strip().startswith("["):
del lines[i]
if not in_section:
print("service", self.svcname, "has no resource", rid, file=sys.stderr)
if not need_write:
return 0
buff = "\n".join(lines)
try:
self._write_cf(buff)
except (IOError, OSError):
print("failed to rewrite", self.paths.cf, file=sys.stderr)
return 1
return 0
def docker(self):
"""
The 'docker' action entry point.
Parse the docker argv and substitute known patterns before relaying
the argv to the docker command.
Set the socket to point the service-private docker daemon if
the service has such a daemon.
"""
import subprocess
containers = self.get_resources('container.docker')
if self.options.docker_argv is None:
print("no docker command arguments supplied", file=sys.stderr)
return 1
def subst(argv):
"""
Parse the docker argv and substitute known patterns.
"""
import re
for idx, arg in enumerate(argv):
if arg in ("%instances%", "{instances}"):
del argv[idx]
instances = [resource.container_name for resource in containers
if not resource.skip and not resource.disabled]
for instance in instances:
argv.insert(idx, instance)
for idx, arg in enumerate(argv):
if arg in ("%images%", "{images}"):
del argv[idx]
images = list(set([resource.run_image for resource in containers
if not resource.skip and not resource.disabled]))
for image in images:
argv.insert(idx, image)
for idx, arg in enumerate(argv):
if arg in ("%as_service%", "{as_service}"):
del argv[idx]
argv[idx:idx] = ["-u", self.svcname+"@"+rcEnv.nodename]
argv[idx:idx] = ["-p", self.node.config.get("node", "uuid")]
if self.dockerlib.docker_min_version("1.12"):
pass
elif self.dockerlib.docker_min_version("1.10"):
argv[idx:idx] = ["--email", self.svcname+"@"+rcEnv.nodename]
for idx, arg in enumerate(argv):
if re.match(r'\{container#\w+\}', arg):
container_name = self.svcname + "." + arg.strip("{}").replace("#", ".")
del argv[idx]
argv.insert(idx, container_name)
return argv
if len(containers) == 0:
print("this service has no docker resource", file=sys.stderr)
return 1
self.dockerlib.docker_start(verbose=False)
cmd = self.dockerlib.docker_cmd + subst(self.options.docker_argv)
proc = subprocess.Popen(cmd)
proc.communicate()
return proc.returncode
def freeze(self):
"""
Call the freeze method of hb resources, then set the frozen flag.
"""
for resource in self.get_resources("hb"):
resource.freeze()
self.freezer.freeze()
def thaw(self):
"""
Call the thaw method of hb resources, then unset the frozen flag.
"""
for resource in self.get_resources("hb"):
resource.thaw()
self.freezer.thaw()
def frozen(self):
"""
Return True if the service is frozen.
"""
return self.freezer.frozen()
def pull(self):
"""
Pull a service configuration from the collector, installs it and
create the svcmgr link.
"""
data = self.node.collector_rest_get("/services/"+self.svcname+"?props=svc_config&meta=0")
if "error" in data:
raise ex.excError(data["error"])
if len(data["data"]) == 0:
raise ex.excError("service not found on the collector")
if len(data["data"][0]["svc_config"]) == 0:
raise ex.excError("service has an empty configuration")
buff = data["data"][0]["svc_config"].replace("\\n", "\n").replace("\\t", "\t")
import codecs
with codecs.open(self.paths.cf, "w", "utf8") as ofile:
ofile.write(buff)
self.log.info("%s pulled", self.paths.cf)
self.node.install_service_files(self.svcname)
if self.options.provision:
self.action("provision")
def validate_config(self, path=None):
"""
The validate config action entrypoint.
"""
ret = self._validate_config(path=path)
return ret["warnings"] + ret["errors"]
def _validate_config(self, path=None):
"""
The validate config core method.
Returns a dict with the list of syntax warnings and errors.
"""
from svcDict import KeyDict, deprecated_sections
from svcBuilder import build, handle_references
from rcUtilities import convert_size
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
data = KeyDict(provision=True)
ret = {
"errors": 0,
"warnings": 0,
}
if path is None:
config = self.config
else:
config = RawConfigParser()
try:
config.read(path)
except ConfigParser.ParsingError:
self.log.error("error parsing %s" % path)
ret["errors"] += 1
def check_scoping(key, section, option):
"""
Verify the specified option scoping is allowed.
"""
if not key.at and "@" in option:
self.log.error("option %s.%s does not support scoping", section, option)
return 1
return 0
def check_references(section, option):
"""
Verify the specified option references.
"""
value = config.get(section, option)
try:
value = handle_references(self, config, value, scope=True)
except ex.excError as exc:
if not option.startswith("pre_") and \
not option.startswith("post_") and \
not option.startswith("blocking_"):
self.log.error(str(exc))
return 1
except Exception as exc:
self.log.error(str(exc))
return 1
return 0
def get_val(key, section, option):
"""
Fetch the value and convert it to expected type.
"""
_option = option.split("@")[0]
value = conf_get_string_scope(self, config, section, _option)
if isinstance(key.default, bool):
return bool(value)
elif isinstance(key.default, int):
try:
return int(value)
except ValueError:
# might be a size string like 11mib
return convert_size(value)
return value
def check_candidates(key, section, option, value):
"""
Verify the specified option value is in allowed candidates.
"""
if key.strict_candidates and key.candidates and value not in key.candidates:
if isinstance(key.candidates, (set, list, tuple)):
candidates = ", ".join(key.candidates)
else:
candidates = str(key.candidates)
self.log.error("option %s.%s value %s is not in valid candidates: %s",
section, option, str(value), candidates)
return 1
return 0
def check_known_option(key, section, option):
"""
Verify the specified option scoping, references and that the value
is in allowed candidates.
"""
err = 0
err += check_scoping(key, section, option)
if check_references(section, option) != 0:
err += 1
return err
try:
value = get_val(key, section, option)
except ex.OptNotFound:
return 0
err += check_candidates(key, section, option, value)
return err
def validate_default_options(config, data, ret):
"""
Validate DEFAULT section options.
"""
for option in config.defaults():
key = data.sections["DEFAULT"].getkey(option)
if key is None:
found = False
# the option can be set in the DEFAULT section for the
# benefit of a resource section
for section in config.sections():
family = section.split("#")[0]
if family not in list(data.sections.keys()) + \
list(deprecated_sections.keys()):
continue
if family in deprecated_sections:
results = deprecated_sections[family]
family = results[0]
if data.sections[family].getkey(option) is not None:
found = True
break
if not found:
self.log.warning("ignored option DEFAULT.%s", option)
ret["warnings"] += 1
else:
# here we know its a native DEFAULT option
ret["errors"] += check_known_option(key, "DEFAULT", option)
return ret
def validate_resources_options(config, data, ret):
"""
Validate resource sections options.
"""
for section in config.sections():
if section == "env":
# the "env" section is not handled by a resource driver, and is
# unknown to the svcDict. Just ignore it.
continue
family = section.split("#")[0]
if config.has_option(section, "type"):
rtype = config.get(section, "type")
else:
rtype = None
if family not in list(data.sections.keys()) + list(deprecated_sections.keys()):
self.log.warning("ignored section %s", section)
ret["warnings"] += 1
continue
if family in deprecated_sections:
self.log.warning("deprecated section prefix %s", family)
ret["warnings"] += 1
family, rtype = deprecated_sections[family]
for option in config.options(section):
if option in config.defaults():
continue
key = data.sections[family].getkey(option, rtype=rtype)
if key is None:
key = data.sections[family].getkey(option)
if key is None:
self.log.warning("ignored option %s.%s, driver %s", section,
option, rtype if rtype else "generic")
ret["warnings"] += 1
else:
ret["errors"] += check_known_option(key, section, option)
return ret
def validate_build(path, ret):
"""
Try a service build to catch errors missed in other tests.
"""
try:
build(self.svcname, svcconf=path)
except Exception as exc:
self.log.error("the new configuration causes the following "
"build error: %s", str(exc))
ret["errors"] += 1
return ret
ret = validate_default_options(config, data, ret)
ret = validate_resources_options(config, data, ret)
ret = validate_build(path, ret)
return ret
def has_run_flag(self):
"""
Return True if the run flag is set or if the run flag dir does not
exist.
"""
flag_d = os.path.dirname(self.paths.run_flag)
if not os.path.exists(flag_d):
return True
if os.path.exists(self.paths.run_flag):
return True
return False
def set_run_flag(self):
"""
Create the /var/run/opensvc. flag if and /var/run exists,
and if the flag does not exist yet.
This flag absence inhibit the service scheduler.
A known issue with scheduled tasks during init is the 'monitor vs
boot' lock contention.
"""
flag_d = os.path.dirname(self.paths.run_flag)
if not os.path.exists(flag_d):
self.log.debug("%s does not exists", flag_d)
return
if os.path.exists(self.paths.run_flag):
self.log.debug("%s already exists", self.paths.run_flag)
return
self.log.debug("create %s", self.paths.run_flag)
try:
with open(self.paths.run_flag, "w"):
pass
except (IOError, OSError) as exc:
self.log.error("failed to create %s: %s",
self.paths.run_flag, str(exc))
def save_exc(self):
"""
A helper method to save stacks in the service log.
"""
self.log.error("unexpected error. stack saved in the service debug log")
self.log.debug("", exc_info=True)
def vcall(self, *args, **kwargs):
"""
Wrap vcall, setting the service logger
"""
kwargs["log"] = self.log
return vcall(*args, **kwargs)
def _read_cf(self):
"""
Return the service config file content.
"""
import codecs
with codecs.open(self.paths.cf, "r", "utf8") as ofile:
buff = ofile.read()
return buff
def _write_cf(self, buff):
"""
Truncate the service config file and write buff.
"""
import codecs
import tempfile
import shutil
ofile = tempfile.NamedTemporaryFile(delete=False, dir=rcEnv.pathtmp, prefix=self.svcname)
fpath = ofile.name
os.chmod(fpath, 0o0644)
ofile.close()
with codecs.open(fpath, "w", "utf8") as ofile:
ofile.write(buff)
shutil.move(fpath, self.paths.cf)
def allocate_rid(self, group):
"""
Return an unused rid in .
"""
rids = [resource.rid for resource in self.get_resources(group)]
idx = 1
while True:
rid = "#".join((group, str(idx)))
if rid in rids:
idx += 1
continue
return rid
def update(self):
"""
The 'update' action entry point.
Add resources to the service configuration, and provision them if
instructed to do so.
"""
self.load_config()
sections = {}
rtypes = {}
defaults = self.config.defaults()
for section in self.config.sections():
sections[section] = {}
elements = section.split('#')
if len(elements) == 2:
rtype = elements[0]
ridx = elements[1]
if rtype not in rtypes:
rtypes[rtype] = set([])
rtypes[rtype].add(ridx)
for option, value in self.config.items(section):
if option in defaults.keys() + ['rtype']:
continue
sections[section][option] = value
import json
import svcBuilder
from svcDict import KeyDict, MissKeyNoDefault, KeyInvalidValue
keys = KeyDict(provision=self.options.provision)
rid = []
for data in self.options.resource:
is_resource = False
if 'rid' in data:
section = data['rid']
if '#' not in section:
raise ex.excError("%s must be formatted as 'rtype#n'" % section)
elements = section.split('#')
if len(elements) != 2:
raise ex.excError("%s must be formatted as 'rtype#n'" % section)
del data['rid']
if section in sections:
sections[section].update(data)
else:
sections[section] = data
is_resource = True
elif 'rtype' in data and data["rtype"] == "env":
del data["rtype"]
if "env" in sections:
sections["env"].update(data)
else:
sections["env"] = data
elif 'rtype' in data and data["rtype"] != "DEFAULT":
section = self.allocate_rid(data['rtype'])
self.log.info("allocated rid %s" % section)
del data['rtype']
sections[section] = data
is_resource = True
else:
if "rtype" in data:
del data["rtype"]
defaults.update(data)
if is_resource:
try:
sections[section].update(keys.update(section, data))
except (MissKeyNoDefault, KeyInvalidValue):
if not self.options.interactive:
raise ex.excError("missing parameters")
rid.append(section)
for section, data in sections.items():
if not self.config.has_section(section):
self.config.add_section(section)
for key, val in data.items():
self.config.set(section, key, val)
self.write_config()
for section, data in sections.items():
group = section.split("#")[0]
getattr(svcBuilder, 'add_'+group)(self, self.config, section)
if self.options.provision and len(rid) > 0:
options = Storage(self.options)
options.rid = rid
self.action("provision", options)
def allow_on_this_node(self, action):
"""
Raise excError if the service is not allowed to run on this node.
In other words, the nodename is not a service node or drpnode, nor the
service mode is cloud proxy.
"""
if action in ACTIONS_ALLOW_ON_INVALID_NODE:
return
if self.type in rcEnv.vt_cloud:
return
if rcEnv.nodename in self.nodes:
return
if rcEnv.nodename in self.drpnodes:
return
raise ex.excError("action '%s' aborted because this node's hostname "
"'%s' is not a member of DEFAULT.nodes, "
"DEFAULT.drpnode nor DEFAULT.drpnodes" % \
(action, rcEnv.nodename))
def compliance_auto(self):
if self.sched.skip_action("compliance_auto"):
return
self.action("compliance_auto")
opensvc-1.8~20170412/lib/rcDiskInfoLinux.py 0000644 0001750 0001750 00000030517 13073467726 020464 0 ustar jkelbert jkelbert from __future__ import print_function
import sys
import os
import re
from rcUtilities import justcall, which
from rcUtilitiesLinux import udevadm_settle
import rcDiskInfo
import math
from rcGlobalEnv import rcEnv
import rcDevTreeVeritas
import glob
class diskInfo(rcDiskInfo.diskInfo):
disk_ids = {}
def __init__(self, deferred=False):
pass
def prefix_local(self, id):
return '.'.join((rcEnv.nodename, id))
def disk_id(self, dev):
if 'cciss' in dev:
id = self.cciss_id(dev)
elif dev.startswith('/dev/disk/by-id/wwn-0x'):
id = dev.replace('/dev/disk/by-id/wwn-0x', '')
elif dev.startswith('/dev/disk/by-id/scsi-2'):
id = dev.replace('/dev/disk/by-id/scsi-2', '')
elif dev.startswith('/dev/disk/by-id/scsi-3'):
id = dev.replace('/dev/disk/by-id/scsi-3', '')
elif dev.startswith('/dev/mapper/3'):
id = dev.replace('/dev/mapper/3', '')
elif dev.startswith('/dev/mapper/2'):
id = dev.replace('/dev/mapper/2', '')
elif "dmp/" in dev:
id = rcDevTreeVeritas.DevTreeVeritas().vx_inq(dev)
elif "Google_PersistentDisk_" in dev or "google-" in dev:
id = self.gce_disk_id(dev)
else:
id = self.scsi_id(dev)
if len(id) == 0:
return self.prefix_local(dev.replace('/dev/','').replace('/','!'))
return id
def get_gce_instance_data(self):
if hasattr(self, "cache_instance_data"):
return self.cache_instance_data
cmd = ["gcloud", "compute", "instances", "describe", "-q", "--format", "json", rcEnv.nodename]
out, err, ret = justcall(cmd)
import json
self.cache_instance_data = json.loads(out)
return self.cache_instance_data
def gce_disk_id(self, dev):
if "Google_PersistentDisk_" in dev:
devname = dev.split("Google_PersistentDisk_")[-1]
else:
devname = dev.split("google-")[-1]
gce_instance_data = self.get_gce_instance_data()
for disk in gce_instance_data["disks"]:
if disk["deviceName"] != devname:
continue
i = disk["source"].index("/project")
return str(disk["source"][i:].replace("/projects", "").replace("/zones", "").replace("/disks", ""))
def cciss_id(self, dev):
if dev in self.disk_ids:
return self.disk_ids[dev]
if which('cciss_id'):
cciss_id = 'cciss_id'
else:
return ""
cmd = [cciss_id, dev]
out, err, ret = justcall(cmd)
if ret == 0:
id = out.split('\n')[0]
if id.startswith('3'):
id = id[1:]
else:
id = self.prefix_local(id)
self.disk_ids[dev] = id
return id
return ""
def mpath_id(self, dev):
self.load_mpath()
if 'dev' not in self.mpath_h:
return None
return self.mpath_h(dev)
def load_mpath_native(self):
cmd = ['multipath', '-l']
out, err, ret = justcall(cmd)
if ret != 0:
return
lines = out.split('\n')
if len(lines) == 0:
return
self.mpath_h = {}
regex = re.compile('[(]*[0-9a-f]*[)]*')
for line in lines:
if len(line) > 0 and \
line[0] not in (' ', '\\', '[', '`', '|'):
l = line.split()
if l[0].startswith("size="):
continue
wwid = None
for w in l:
w = w.strip("()")
if len(w) not in [17, 33]:
continue
if regex.match(w) is None:
continue
if w[0] in ("2,", "3", "5"):
wwid = w[1:]
elif " sd" in line:
l = line.split()
for i, w in enumerate(l):
if w.startswith('sd'):
dev = "/dev/"+w
self.mpath_h[dev] = wwid
def load_mpath(self):
if hasattr(self, "mpath_h"):
return self.mpath_h
self.mpath_h = {}
if which('multipath'):
self.load_mpath_native()
return self.mpath_h
def scsi_id(self, dev):
s = self._scsi_id(dev, ["-p", "0x83"])
if len(s) == 0:
s = self._scsi_id(dev, ["-p", "pre-spc3-83"])
return s
def _scsi_id(self, dev, args=[]):
wwid = self.mpath_id(dev)
if wwid is not None:
return wwid
if dev in self.disk_ids:
return self.disk_ids[dev]
if which('scsi_id'):
scsi_id = 'scsi_id'
elif which('/lib/udev/scsi_id'):
scsi_id = '/lib/udev/scsi_id'
else:
return ""
cmd = [scsi_id, '-g', '-u'] + args + ['-d', dev]
out, err, ret = justcall(cmd)
if ret == 0:
id = out.split('\n')[0]
if id.startswith('3') or id.startswith('2') or id.startswith('5'):
id = id[1:]
else:
id = self.prefix_local(id)
self.disk_ids[dev] = id
return id
sdev = dev.replace("/dev/", "/block/")
cmd = [scsi_id, '-g', '-u'] + args + ['-s', sdev]
out, err, ret = justcall(cmd)
if ret == 0:
id = out.split('\n')[0]
if id.startswith('3') or id.startswith('2') or id.startswith('5'):
id = id[1:]
else:
id = self.prefix_local(id)
self.disk_ids[dev] = id
return id
return ""
def devpath_to_sysname(self, devpath):
devpath = os.path.realpath(devpath)
return os.path.basename(devpath)
def disk_vendor(self, dev):
if 'cciss' in dev:
return 'HP'
s = ''
dev = self.devpath_to_sysname(dev)
if dev.startswith("sd"):
dev = re.sub("[0-9]+$", "", dev)
path = '/sys/block/%s/device/vendor' % dev
if not os.path.exists(path):
l = glob.glob("/sys/block/%s/slaves/*/device/vendor" % dev)
if len(l) > 0:
path = l[0]
if not os.path.exists(path):
return ""
with open(path, 'r') as f:
s = f.read()
f.close()
if '6900' in s:
s = 'Red Hat'
return s.strip()
def disk_model(self, dev):
if 'cciss' in dev:
return 'VOLUME'
s = ''
vendor = self.disk_vendor(dev)
dev = self.devpath_to_sysname(dev)
if dev.startswith("sd"):
dev = re.sub("[0-9]+$", "", dev)
path = '/sys/block/%s/device/model' % dev
if not os.path.exists(path):
l = glob.glob("/sys/block/%s/slaves/*/device/model" % dev)
if len(l) > 0:
path = l[0]
if not os.path.exists(path):
if 'Red Hat' in vendor:
return 'VirtIO'
else:
return ""
with open(path, 'r') as f:
s = f.read()
f.close()
return s.strip()
def disk_size(self, dev):
size = 0
if '/dev/mapper/' in dev:
try:
statinfo = os.stat(dev)
except:
raise Exception("can not stat %s" % dev)
dm = 'dm-' + str(os.minor(statinfo.st_rdev))
path = '/sys/block/' + dm + '/size'
if not os.path.exists(path):
return 0
else:
path = dev.replace('/dev/', '/sys/block/')+'/size'
if not os.path.exists(path):
cmd = ['blockdev', '--getsize', dev]
out, err, ret = justcall(cmd)
if ret != 0:
return 0
return int(math.ceil(1.*int(out)/2048))
with open(path, 'r') as f:
size = f.read()
f.close()
return int(math.ceil(1.*int(size)/2048))
def print_diskinfo(self, disk):
name = os.path.basename(disk)
info = {
'dev': '',
'size': 0,
'device/vendor': '',
'device/model': '',
}
for i in info:
i_f = os.path.join(disk, i)
if not os.path.exists(i_f):
continue
with open(i_f, 'r') as f:
info[i] = f.read().strip()
if '6900' in info['device/vendor']:
info['device/vendor'] = 'Red Hat'
if info['device/model'] is '':
info['device/model'] = 'VirtIO'
info['hbtl'] = os.path.basename(os.path.realpath(os.path.join(disk, "device")))
print(self.print_diskinfo_fmt%(
info['hbtl'],
name,
int(float(info['size'])/2//1024),
info['dev'],
info['device/vendor'],
info['device/model'],
))
def hba_num(self, hba=None):
if hba is None:
return
if hba.startswith("iqn"):
for path in glob.glob("/sys/class/scsi_host/host*"):
for _path in glob.glob(path+"/device/session*/iscsi_session/session*/initiatorname"):
with open(_path, "r") as f:
content = f.read().strip()
if content == hba:
return os.path.basename(path).replace("host", "")
for path in glob.glob("/sys/class/fc_host/host*"):
for _path in glob.glob(path+"/port_name"):
with open(_path, "r") as f:
content = f.read().strip()
if content == hba or "0x"+content == hba:
return os.path.basename(path).replace("host", "")
def target_num(self, host_num, target=None):
if target is None:
return
if target.startswith("iqn"):
for path in glob.glob("/sys/class/scsi_host/host"+str(host_num)+"/device/session*"):
for _path in glob.glob(path+"/iscsi_session/session1/targetname"):
with open(_path, "r") as f:
content = f.read().strip()
if content == target:
path = glob.glob(path+"/target*:*:*")[0]
return path.split(":")[-1]
for path in glob.glob("/sys/class/fc_transport/target%s:*:*" % str(host_num)):
for _path in glob.glob(path+"/port_name"):
with open(_path, "r") as f:
content = f.read().strip()
if content == target or "0x"+content == target:
return os.path.basename(path).split(":")[-1]
def scanscsi(self, hba=None, target=None, lun=None):
if not os.path.exists('/sys') or not os.path.ismount('/sys'):
print("scanscsi is not supported without /sys mounted", file=sys.stderr)
return 1
disks_before = glob.glob('/sys/block/sd*')
disks_before += glob.glob('/sys/block/vd*')
hba_num = self.hba_num(hba)
if hba_num is not None:
hosts = glob.glob('/sys/class/scsi_host/host'+str(hba_num))
target_num = self.target_num(hba_num, target)
else:
hosts = glob.glob('/sys/class/scsi_host/host*')
target_num = None
if target_num is None:
target_num = '-'
if lun is None:
lun = '-'
for host in hosts:
scan_f = host+'/scan'
if not os.path.exists(scan_f):
continue
print("scan", os.path.basename(host), "target"+target_num, "lun"+lun)
os.system('echo - ' + target_num + ' ' + lun + ' >' + scan_f)
udevadm_settle()
disks_after = glob.glob('/sys/block/sd*')
disks_after += glob.glob('/sys/block/vd*')
new_disks = set(disks_after) - set(disks_before)
if len(new_disks) == 0:
print("no new disk found")
return 0
self.print_diskinfo_header()
#for disk in disks_before:
for disk in new_disks:
self.print_diskinfo(disk)
return 0
if __name__ == "__main__":
diskinfo = diskInfo()
disks = glob.glob('/sys/block/sd*')
disks += glob.glob('/sys/block/vd*')
diskinfo.print_diskinfo_header()
for disk in disks:
diskinfo.print_diskinfo(disk)
#dev = '/dev/vda'
#vendor = diskinfo.disk_vendor(dev)
#model = diskinfo.disk_model(dev)
#print("%s has vendor [%s] model [%s]" % (dev, vendor, model))
opensvc-1.8~20170412/lib/svcRhcs.py 0000644 0001750 0001750 00000013014 13073467726 017015 0 ustar jkelbert jkelbert import os
import svc
import socket
import rcExceptions as ex
from rcUtilities import justcall
from rcGlobalEnv import rcEnv
from xml.etree.ElementTree import ElementTree, SubElement
import rcIfconfigLinux as rcIfconfig
class SvcRhcs(svc.Svc):
builder_props = [
"nodes",
]
def __init__(self, svcname, pkg_name=None):
self.type = "rhcs"
svc.Svc.__init__(self, svcname)
self.cf = "/etc/cluster/cluster.conf"
self.pkg_name = pkg_name
ifconfig = rcIfconfig.ifconfig()
self.node_ips = []
self.member_to_nodename_h = {}
for i in ifconfig.intf:
self.node_ips += i.ipaddr
def getaddr(self, ipname):
a = socket.getaddrinfo(ipname, None)
if len(a) == 0:
raise ex.excError("unable to resolve %s ip address" % ipname)
addr = a[0][4][0]
return addr
def member_to_nodename(self, member):
if member in self.member_to_nodename_h:
return self.member_to_nodename_h[member]
try:
addr = self.getaddr(member)
except:
self.member_to_nodename_h[member] = member
return self.member_to_nodename_h[member]
if addr in self.node_ips:
self.member_to_nodename_h[member] = rcEnv.nodename
return self.member_to_nodename_h[member]
cmd = rcEnv.rsh.split() + [member, "hostname"]
out, err, ret = justcall(cmd)
if ret != 0:
self.member_to_nodename_h[member] = member
return self.member_to_nodename_h[member]
self.member_to_nodename_h[member] = out.strip()
return self.member_to_nodename_h[member]
def load_cluster_conf(self):
self.tree = ElementTree()
self.tree.parse(self.cf)
e = self.tree.getiterator('cluster')
if len(e) != 1:
raise ex.excInitError()
self.xml = e[0]
def builder(self):
if self.pkg_name is None:
self.error("pkg_name is not set")
raise ex.excInitError()
self.load_cluster_conf()
self.load_service()
self.load_nodes()
self.load_clustat()
self.load_hb()
def load_clustat(self):
cmd = ['clustat']
out, err, ret = justcall(cmd)
if ret != 0:
raise ex.excInitError()
self.clustat = out.split('\n')
def load_hb(self):
rid = 'hb#sg0'
m = __import__("resHbRhcs")
r = m.Hb(rid, name=self.pkg_name)
self += r
def load_vg(self, e):
"""
r = m.Disk(rid, name=name)
self += r
r.monitor = True
self.n_vg += 1
"""
pass
def load_ip(self, e):
"""
"""
if 'ref' in e.attrib:
# load ref xml node and recurse
return
if not 'address' in e.attrib:
return
ipname = e.attrib['address']
n = self.n_ip
rid = 'ip#rhcs%d'%n
m = __import__("resIpRhcs"+rcEnv.sysname)
r = m.Ip(rid, ipdev="", ipname=ipname, mask="")
r.monitor = True
self += r
self.n_ip += 1
def load_fs(self, e):
"""