opensvc-1.8~20170412/0000755000175000017500000000000013073470116014246 5ustar jkelbertjkelbertopensvc-1.8~20170412/inpath.cmd0000644000175000017500000000711713073467726016241 0ustar jkelbertjkelbert@echo off :addPath pathVar /B :: :: Safely appends the path contained within variable pathVar to the end :: of PATH if and only if the path does not already exist within PATH. :: :: If the case insensitive /B option is specified, then the path is :: inserted into the front (Beginning) of PATH instead. :: :: If the pathVar path is fully qualified, then it is logically compared :: to each fully qualified path within PATH. The path strings are :: considered a match if they are logically equivalent. :: :: If the pathVar path is relative, then it is strictly compared to each :: relative path within PATH. Case differences and double quotes are :: ignored, but otherwise the path strings must match exactly. :: :: Before appending the pathVar path, all double quotes are stripped, and :: then the path is enclosed in double quotes if and only if the path :: contains at least one semicolon. :: :: addPath aborts with ERRORLEVEL 2 if pathVar is missing or undefined :: or if PATH is undefined. :: ::------------------------------------------------------------------------ :: :: Error checking if "%~1"=="" exit /b 2 if not defined %~1 exit /b 2 if not defined path exit /b 2 :: :: Determine if function was called while delayed expansion was enabled setlocal set "NotDelayed=!" :: :: Prepare to safely parse PATH into individual paths setlocal DisableDelayedExpansion set "var=%path:"=""%" set "var=%var:^=^^%" set "var=%var:&=^&%" set "var=%var:|=^|%" set "var=%var:<=^<%" set "var=%var:>=^>%" set "var=%var:;=^;^;%" set var=%var:""="% set "var=%var:"=""Q%" set "var=%var:;;="S"S%" set "var=%var:^;^;=;%" set "var=%var:""="%" setlocal EnableDelayedExpansion set "var=!var:"Q=!" set "var=!var:"S"S=";"!" :: :: Remove quotes from pathVar and abort if it becomes empty set "new=!%~1:"^=!" if not defined new exit /b 2 :: :: Determine if pathVar is fully qualified echo("!new!"|findstr /i /r /c:^"^^\"[a-zA-Z]:[\\/][^\\/]" ^ /c:^"^^\"[\\][\\]" >nul ^ && set "abs=1" || set "abs=0" :: :: For each path in PATH, check if path is fully qualified and then :: do proper comparison with pathVar. Exit if a match is found. :: Delayed expansion must be disabled when expanding FOR variables :: just in case the value contains ! for %%A in ("!new!\") do for %%B in ("!var!") do ( if "!!"=="" setlocal disableDelayedExpansion for %%C in ("%%~B\") do ( echo(%%B|findstr /i /r /c:^"^^\"[a-zA-Z]:[\\/][^\\/]" ^ /c:^"^^\"[\\][\\]" >nul ^ && (if %abs%==1 if /i "%%~sA"=="%%~sC" exit /b 0) ^ || (if %abs%==0 if /i %%A==%%C exit /b 0) ) ) :: :: Build the modified PATH, enclosing the added path in quotes :: only if it contains ; setlocal enableDelayedExpansion if "!new:;=!" neq "!new!" set new="!new!" if /i "%~2"=="/B" (set "rtn=!new!;!path!") else set "rtn=!path!;!new!" :: :: rtn now contains the modified PATH. We need to safely pass the :: value accross the ENDLOCAL barrier :: :: Make rtn safe for assignment using normal expansion by replacing :: % and " with not yet defined FOR variables set "rtn=!rtn:%%=%%A!" set "rtn=!rtn:"=%%B!" :: :: Escape ^ and ! if function was called while delayed expansion was enabled. :: The trailing ! in the second assignment is critical and must not be removed. if not defined NotDelayed set "rtn=!rtn:^=^^^^!" if not defined NotDelayed set "rtn=%rtn:!=^^^!%" ! :: :: Pass the rtn value accross the ENDLOCAL barrier using FOR variables to :: restore the % and " characters. Again the trailing ! is critical. for /f "usebackq tokens=1,2" %%A in ('%%^ ^"') do ( endlocal & endlocal & endlocal & endlocal & endlocal set "path=%rtn%" ! ) exit /b 0opensvc-1.8~20170412/svcmon.cmd0000644000175000017500000000011013073467726016245 0ustar jkelbertjkelbert@echo off call osvcenv.cmd "%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\svcmon" %*opensvc-1.8~20170412/nodemgr.cmd0000644000175000017500000000011113073467726016374 0ustar jkelbertjkelbert@echo off call osvcenv.cmd "%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\nodemgr" %*opensvc-1.8~20170412/svcmgr.cmd0000644000175000017500000000011013073467726016241 0ustar jkelbertjkelbert@echo off call osvcenv.cmd "%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\svcmgr" %*opensvc-1.8~20170412/bin/0000755000175000017500000000000013073470034015015 5ustar jkelbertjkelbertopensvc-1.8~20170412/bin/init/0000755000175000017500000000000013073467726015776 5ustar jkelbertjkelbertopensvc-1.8~20170412/bin/init/opensvc.agent.xml0000644000175000017500000000406113073467726021273 0ustar jkelbertjkelbert opensvc-1.8~20170412/bin/init/opensvc.init.Darwin0000755000175000017500000000171313073467726021570 0ustar jkelbertjkelbert#!/bin/bash # # Starts the services driven by OpenSVC # # description: Starts the services driven by OpenSVC whose # autostart node is this node. # processname: PATH=/usr/bin:/usr/sbin:$PATH DEFAULTS="/etc/defaults/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ipconfig waitall echo "OpenSVC : Pushing node information" ${OSVC_ROOT_PATH}/bin/nodemgr pushasset echo echo "OpenSVC : Starting Services" [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) echo "OpenSVC : Stopping Services" ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.redhat0000755000175000017500000000172113073467726021612 0ustar jkelbertjkelbert#!/bin/bash # # /etc/rc.d/init.d/opensvc # # Starts the services driven by OpenSVC # # chkconfig: 2345 99 01 # description: Starts the services driven by OpenSVC whose # autostart node is this node. # processname: # Source function library. . /etc/init.d/functions DEFAULTS="/etc/sysconfig/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } [ -d /var/lock/subsys ] && touch /var/lock/subsys/opensvc ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown rm -f /var/lock/subsys/opensvc ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.SunOS0000755000175000017500000000144113073467726021351 0ustar jkelbertjkelbert#!/bin/sh # # /etc/init.d/opensvc # # Starts the services driven by OpenSVC # # description: Starts the services driven by OpenSVC whose # autostart node is this node. # processname: DEFAULTS="/etc/default/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.AIX0000755000175000017500000000114113073467726020760 0ustar jkelbertjkelbert#!/bin/ksh DEFAULTS="/etc/default/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/darwin.com.opensvc.svcmgr.plist0000644000175000017500000000112513073467726024067 0ustar jkelbertjkelbert Label com.opensvc.svcmgr ProgramArguments /usr/share/opensvc/bin/init/opensvc.init.Darwin start RunAtLoad StandardErrorPath /var/log/opensvc/svcmgr_boot_stderr.log StandardOutPath /var/log/opensvc/svcmgr_boot_stdout.log opensvc-1.8~20170412/bin/init/opensvc.init.FreeBSD0000755000175000017500000000130413073467726021552 0ustar jkelbertjkelbert#!/bin/sh # # PROVIDE: opensvc # REQUIRE: LOGIN sshd cleanvar # BEFORE: # KEYWORD: shutdown DEFAULTS="/etc/defaults/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in faststart) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; faststop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.hpux0000755000175000017500000000207013073467726021325 0ustar jkelbertjkelbert#!/bin/sh PATH=/sbin:/usr/sbin:/bin:/usr/bin export PATH DEFAULTS="/etc/rc.config.d/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start_msg) if [ "$RUN_OPENSVC" -ne 0 ] ; then echo "Starting opensvc services" fi ;; start) if [ "$RUN_OPENSVC" -ne 0 ] ; then echo "Starting opensvc services" else exit 0 fi ${OSVC_ROOT_PATH}/bin/nodemgr collect stats ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop_msg) if [ "$RUN_OPENSVC" -ne 0 ] ; then echo "Shutting down opensvc services" fi ;; stop) if [ "$RUN_OPENSVC" -ne 0 ] ; then echo "Shutting down opensvc services" else exit 0 fi ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.debian0000755000175000017500000000153313073467726021566 0ustar jkelbertjkelbert#!/bin/bash ### BEGIN INIT INFO # Provides: opensvc # Required-Start: $all # Required-Stop: # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: OpenSVC services startup script ### END INIT INFO DEFAULTS="/etc/default/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.OSF10000755000175000017500000000115413073467726021053 0ustar jkelbertjkelbert#!/usr/bin/ksh DEFAULTS="/etc/default/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/opensvc.init.suse0000755000175000017500000000153713073467726021327 0ustar jkelbertjkelbert#!/bin/sh -e ### BEGIN INIT INFO # Provides: opensvc # Required-Start: $all # Required-Stop: # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: OpenSVC services startup script ### END INIT INFO DEFAULTS="/etc/sysconfig/opensvc" OSVC_BOOT_OPTS="--parallel" OSVC_ROOT_PATH="/usr/share/opensvc" # Include opensvc defaults if available [ -r "$DEFAULTS" ] && . "$DEFAULTS" # Compat [ -n "$osvc_opts" ] && OSVC_BOOT_OPTS=${osvc_opts} [ -n "$osvc_background" ] && OSVC_BACKGROUND=${osvc_background} allservices=${OSVC_ROOT_PATH}/bin/svcmgr case $1 in start) ${OSVC_ROOT_PATH}/bin/nodemgr pushasset [ "${OSVC_BACKGROUND}" == "true" ] && { ${allservices} ${OSVC_BOOT_OPTS} boot & } || { ${allservices} ${OSVC_BOOT_OPTS} boot } ;; stop) ${allservices} ${OSVC_BOOT_OPTS} shutdown ;; esac opensvc-1.8~20170412/bin/init/systemd.opensvc-agent.service0000644000175000017500000000071613073467726023624 0ustar jkelbertjkelbert[Unit] Description=OpenSVC Agent Documentation=http://docs.opensvc.com/ file:/usr/share/doc/opensvc/ After=network.target network-online.target [Service] Type=idle Environment="PATH=/opt/opensvc/bin:/opt/opensvc/etc:/etc/opensvc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ExecStart=/usr/share/opensvc/bin/init/opensvc.init start ExecStop=/usr/share/opensvc/bin/init/opensvc.init stop RemainAfterExit=yes [Install] WantedBy=multi-user.target opensvc-1.8~20170412/bin/opensvc0000755000175000017500000000244413073467726016442 0ustar jkelbertjkelbert#!/bin/sh # variables users can override in the defaults file OSVC_ROOT_PATH="/usr/share/opensvc" OSVC_PYTHON="python" OSVC_PYTHON_ARGS="" if [ -r "/etc/defaults/opensvc" ] then # FreeBSD, Darwin . "/etc/defaults/opensvc" elif [ -r "/etc/default/opensvc" ] then # Debian-like, Tru64, SunOS and HP-UX . "/etc/default/opensvc" elif [ -r "/etc/sysconfig/opensvc" ] then # Red Hat-like . "/etc/sysconfig/opensvc" elif [ -r "/etc/rc.config.d/opensvc" ] then # AIX . "/etc/rc.config.d/opensvc" fi BASENAME=`basename $0` case $BASENAME in opensvc) # # Use me as the shebang for python modules to be garantied the # same python requirements than the agent are met (ie 2.6+). # # Example: #!/usr/bin/env opensvc # exec "$OSVC_PYTHON" "$@" ;; *.cluster) BIN_ARGS="--cluster" BASENAME=`echo $BASENAME|sed -e "s/.cluster$//"` ;; *.stonith) set -- BIN_ARGS="stonith --cluster" BASENAME=`echo $BASENAME|sed -e "s/.stonith$//"` ;; esac if [ "$BASENAME" = "nodemgr" -o "$BASENAME" = "svcmgr" -o "$BASENAME" = "svcmon" ] then unset OSVC_SERVICE_LINK BIN="$OSVC_ROOT_PATH/lib/$BASENAME.py" else # exec from a service link: add the --service parameter OSVC_SERVICE_LINK=$BASENAME export OSVC_SERVICE_LINK BIN="$OSVC_ROOT_PATH/lib/svcmgr.py" fi "$OSVC_PYTHON" $OSVC_PYTHON_ARGS "$BIN" $BIN_ARGS "$@" opensvc-1.8~20170412/bin/svcmgr0000777000175000017500000000000013073467726017654 2opensvcustar jkelbertjkelbertopensvc-1.8~20170412/bin/postinstall0000755000175000017500000011335113073467726017341 0ustar jkelbertjkelbert#!/usr/bin/env python import os import errno import shutil import glob import sys import tempfile import inspect try: sysname, nodename, x, x, machine = os.uname() except: import platform sysname, nodename, x, x, machine, x = platform.uname() postinstall_d = sys.path[0] if '/catalog/' in postinstall_d: # hpux packaging subsystem executes the postinstall from dir # /var/tmp/XXXXXXXXX/catalog/opensvc/commands/ postinstall_d = "/usr/share/opensvc/bin" lsb = True elif postinstall_d == "/usr/share/opensvc/bin": lsb = True else: # windows install or unix execution from a non-lsb tree (ex: /opt/opensvc/) lsb = False if lsb: pathsbin = "/usr/bin" pathsvc = None pathetc = "/etc/opensvc" pathvar = "/var/lib/opensvc" pathlck = '/var/lib/opensvc/lock' pathtmp = "/var/tmp/opensvc" pathlog = "/var/log/opensvc" pathbin = "/usr/share/opensvc/bin" pathlib = "/usr/share/opensvc/lib" pathini = "/usr/share/opensvc/bin/init" pathusr = None else: pathsbin = postinstall_d pathsvc = os.path.realpath(os.path.join(pathsbin, '..')) pathetc = os.path.join(pathsvc, 'etc') pathvar = os.path.join(pathsvc, 'var') pathlck = os.path.join(pathvar, 'lock') pathtmp = os.path.join(pathsvc, 'tmp') pathlog = os.path.join(pathsvc, 'log') pathbin = postinstall_d pathlib = os.path.join(pathsvc, 'lib') pathini = os.path.join(pathsvc, 'bin', 'init') pathusr = os.path.join(pathsvc, 'usr') def make_sure_path_exists(path): try: os.makedirs(path, 0755) except OSError, exception: if exception.errno != errno.EEXIST: raise def logit(msg,stdout=False,stderr=False): curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) try: import datetime timestamp = str(datetime.datetime.now()) except: timestamp = '?' osvlog = os.path.join(pathlog, 'postinstall.log') content = '['+calframe[1][3]+']['+timestamp+'] '+msg f = open(osvlog, 'a') f.write(content+'\n') f.close() if stdout: print msg if stderr: print >>sys.stderr, "error ==> " + msg make_sure_path_exists(pathlog) logit("\nStarting OpenSVC postinstall\n",stdout=True) SolarisRootRelocate = False if sysname == 'SunOS' and "PKG_INSTALL_ROOT" in os.environ and os.environ['PKG_INSTALL_ROOT'] != '/': logit("SunOS PKG_INSTALL_ROOT <%s>"%(os.environ['PKG_INSTALL_ROOT'])) SolarisRootRelocate = True variables = { "pathsvc": pathsvc, "pathsbin": pathsbin, "pathbin": pathbin, "pathetc": pathetc, "pathvar": pathvar, "pathtmp": pathtmp, "pathlib": pathlib, "pathlog": pathlog, "pathusr": pathusr, "SolarisRootRelocate": SolarisRootRelocate } for key in variables: logit("var %s <%s>"%(key,variables[key])) def install_cron(): logit("begin") if sysname == 'Windows': logit("windows not applicable") return else: return install_cron_unix() def install_cron_windows(): logit("begin") logit("install OsvcSched service",stdout=True) schedstop() schedremove() schedinstall() schedstart() def schedremove(): logit("begin") cmd = ' remove' schedcmd(cmd) def schedstart(): logit("begin") cmd = ' start' schedcmd(cmd) def schedstop(): logit("begin") cmd = ' stop' schedcmd(cmd) def schedinstall(): logit("begin") cmd = '' cmd += ' --username LocalSystem' cmd += ' --startup auto' cmd += ' install\n' schedcmd(cmd) def schedcmd(_cmd): logit("begin") logit("_cmd %s"%_cmd) rc = '"'+sys.executable+'" "'+os.path.join(pathlib, 'rcWinScheduler.py')+'"' cmd = "@echo off\n" cmd += rc cmd += _cmd fd, fname = tempfile.mkstemp(dir=pathtmp, suffix='.cmd') f = os.fdopen(fd, 'w') f.write(cmd) f.close() import subprocess subprocess.call([fname]) os.unlink(fname) def save_file(infile): logit("begin") logit("infile <%s>"%infile) if not os.path.exists(infile): return True try: import datetime timestamp = str(datetime.datetime.now()) tmp = timestamp.replace(" ", ".") ts = tmp.replace(":", ".") except: ts = 'opensvc.postinstall' ofname = os.path.basename(infile) logit("ofname <%s>"%ofname) nfname = ofname + '.crontab.' + ts logit("nfname <%s>"%nfname) outfile = os.path.join(os.sep, pathtmp, nfname) logit("outfile <%s>"%outfile) logit("saving file <%s> to <%s>"%(infile,outfile),stdout=True) try: shutil.copyfile(infile, outfile) except: import traceback traceback.print_exc() logit("error while trying to save file <%s> to <%s>"%(infile,outfile),stderr=True) return False return True def install_cron_unix(): logit("begin") """install opensvc cron jobs """ nodemgr = os.path.join(pathsbin, 'nodemgr') ce = [{ 'sched': "* * * * *", 'reset_sched': True, 'user': "", 'cmd': "[ -x "+nodemgr+" ] && "+nodemgr+" schedulers >/dev/null 2>&1", 'marker': nodemgr + ' schedulers', 'ok': False }] remove_entries = [ 'bin/nodemgr compliance check', 'bin/svcmon ', 'bin/cron/opensvc', 'svcmgr resource monitor', 'svcmgr resource_monitor', 'nodemgr cron', 'perfagt.'+sysname, ] purge = [] root_crontab = False """ order of preference """ if sysname == 'SunOS' : if SolarisRootRelocate is True: suncron = os.environ["PKG_INSTALL_ROOT"] + '/var/spool/cron/crontabs/root' root_crontab_locs = [ suncron ] else: root_crontab_locs = [ '/var/spool/cron/crontabs/root' ] else: root_crontab_locs = [ '/etc/cron.d/opensvc', '/var/spool/cron/crontabs/root', '/var/spool/cron/root', '/var/cron/tabs/root', '/usr/lib/cron/tabs/root', ] for loc in root_crontab_locs: logit("looping crontab location <%s>"%loc) if os.path.exists(os.path.dirname(loc)): if not root_crontab: root_crontab = loc logit("identifying <%s> as root crontab"%root_crontab) if root_crontab == '/etc/cron.d/opensvc': logit("assigning root:root as crontab owner") ce[0]['user'] = "root" elif os.path.exists(loc): logit("adding <%s> to purge table"%loc) purge.append(loc) if not root_crontab: logit("no root crontab found in usual locations <%s>"%str(root_crontab_locs),stderr=True) return False ce[0]['full'] = ' '.join([ce[0]['sched'], ce[0]['user'], ce[0]['cmd']]) logit("osvcagt crontab entry <%s>"%ce[0]['full']) new = False if os.path.exists(root_crontab): try: f = open(root_crontab, 'r') new = f.readlines() f.close() logit("loaded crontab <%s> content <%s>"%(root_crontab,new)) except: f.close() import traceback traceback.print_exc() i = -1 for line in new: i += 1 for c in ce: if c['full'] is None: continue if line.find(c['marker']) != -1: if line.strip().startswith("#"): continue if c['ok']: new[i] = "" continue if c['reset_sched']: sched = c['sched'] else: sched = ' '.join(line.split()[:5]) new[i] = ' '.join([sched, c['user'], c['cmd']])+'\n' c['ok'] = True for c in ce: if c['full'] is not None and not c['ok']: new.append(c['full']+'\n') else: new = [] for c in ce: if c['full'] is not None and not c['ok']: new.append(c['full']+'\n') logit("no crontab <%s>. building new content <%s>"%(root_crontab,new)) if not new: logit("problem preparing the new crontab",stderr=True) return False i = -1 for line in new: i += 1 for re in remove_entries: logit("looping re <%s>"%re) if line.find(re) != -1: logit("delete line <%s> from <%s>"%(re,root_crontab)) del new[i] logit("saving crontab <%s>"%root_crontab) try: save_file(root_crontab) except: logit('Error while trying to backup crontab <%s>. skipping crontab update'%(root_crontab),stderr=True) return False logit("updating crontab <%s> with content <%s>"%(root_crontab,new)) try: f = open(root_crontab, 'w') f.write(''.join(new)) f.close() except: logit("error while trying to update crontab %s"%root_crontab,stderr=True) f.close() import traceback traceback.print_exc() """ Activate changes (actually only needed on HP-UX) """ if sysname in ("HP-UX", "SunOS") and root_crontab.find('/var/spool/') != -1: logit("crontab activation requested") cmd = ['crontab', root_crontab] ret = os.system(' '.join(cmd)) for loc in purge: try: f = open(loc, 'r') new = [ line for line in f.readlines() if line.find('opensvc.daily') == -1 and line.find('svcmon --updatedb') == -1 ] f.close() f = open(loc, 'w') f.write(''.join(new)) f.close() except: f.close() import traceback traceback.print_exc() """ Clean up old standard file locations """ for f in ['/etc/cron.daily/opensvc', '/etc/cron.daily/opensvc.daily']: if os.path.exists(f): logit("removing %s"%f) os.unlink(f) def activate_chkconfig(svc): logit("begin") cmd = ['chkconfig', '--add', svc] ret = os.system(' '.join(cmd)) if ret > 0: return False return True def activate_systemd(launcher): logit("begin") systemdsvc = 'opensvc-agent.service' # populate systemd tree with opensvc unit file src = os.path.join(pathini, 'systemd.opensvc-agent.service') dst = os.path.join('/etc/systemd/system/', systemdsvc) logit("installing systemd unit file",stdout=True) try: shutil.copyfile(src, dst) os.chmod(dst, 0644) except: logit("issue met while trying to install systemd unit file",stderr=True) # add symlink to resolve systemd service call systemd_call = os.path.join(pathini, "opensvc.init") if not os.path.islink(systemd_call): if os.path.exists(systemd_call): logit("removing %s"%systemd_call) os.unlink(systemd_call) msg = "create link %s -> %s"%(systemd_call, launcher) logit(msg) try: os.symlink(launcher, systemd_call) except: logit("issue met while trying to create %s symlink" % system_call,stderr=True) # set systemd call as ExecStart and ExecStop os.system("sed -i 's@/usr/share/opensvc/bin/init/opensvc.init@"+systemd_call+"@' "+dst) # reload systemd configuration logit("reloading systemd configuration",stdout=True) cmd = ['systemctl', '-q', 'daemon-reload'] ret = os.system(' '.join(cmd)) if ret > 0: logit("issue met during systemctl reload",stderr=True) # enable opensvc agent startup through systemd logit("enabling systemd configuration") cmd = ['systemctl', '-q', 'enable', systemdsvc] ret = os.system(' '.join(cmd)) if ret > 0: logit("issue met during systemctl enable",stderr=True) def systemd_mgmt(): logit("begin") cmd = ['systemctl', '--version', '>>/dev/null', '2>&1'] ret = os.system(' '.join(cmd)) if ret > 0: return False return True def activate_ovm(launcher): logit("begin") activate_chkconfig('zopensvc') def activate_redhat(launcher): logit("begin") activate_chkconfig('opensvc') def activate_debian(launcher): logit("begin") cmd = ['update-rc.d', '-f', 'opensvc', 'remove'] ret = os.system(' '.join(cmd)) if ret > 0: logit("issue met while trying to remove opensvc rc launchers",stderr=True) return False cmd = ['update-rc.d', 'opensvc', 'defaults'] ret = os.system(' '.join(cmd)) if ret > 0: logit("issue met while trying to install opensvc rc launchers",stderr=True) return False return True def activate_hpux(launcher): logit("begin") rc = "/sbin/init.d/opensvc" links = ["/sbin/rc1.d/K010opensvc", "/sbin/rc2.d/K010opensvc", "/sbin/rc3.d/S990opensvc"] if os.path.exists("/sbin/rc2.d/S990opensvc"): logit("removing /sbin/rc2.d/S990opensvc") os.unlink("/sbin/rc2.d/S990opensvc") for l in links: if not os.path.islink(l): if os.path.exists(l): logit("removing %s"%l) os.unlink(l) logit("create link %s -> %s"%(l,rc)) os.symlink(rc, l) try: f = open("/etc/rc.config.d/opensvc", "w") f.write("RUN_OPENSVC=1\n") f.close() except: logit("issue met while trying to install rc.config.d opensvc file",stderr=True) f.close() import traceback traceback.print_exc() return True def activate_AIX(launcher): logit("begin") rc = "/etc/rc.d/init.d/opensvc" links = ["/etc/rc.d/rc2.d/S990opensvc"] for l in links: if not os.path.islink(l): if os.path.exists(l): logit("removing %s"%l) os.unlink(l) logit("create link %s -> %s"%(l,rc)) os.symlink(rc, l) return True def activate_OSF1(launcher): logit("begin") rc = "/sbin/init.d/opensvc" links = ["/sbin/rc0.d/K010opensvc", "/sbin/rc2.d/K010opensvc", "/sbin/rc3.d/S990opensvc"] for l in links: if not os.path.islink(l): if os.path.exists(l): logit("removing %s"%l) os.unlink(l) logit("symlinking %s and %s"%(rc,l)) os.symlink(rc, l) return True def activate_SunOS(launcher): logit("begin") if SolarisRootRelocate is True: rc = "/etc/init.d/opensvc" links = [os.environ["PKG_INSTALL_ROOT"] + "/etc/rc0.d/K00opensvc", os.environ["PKG_INSTALL_ROOT"] + "/etc/rc3.d/S99opensvc"] else: rc = "/etc/init.d/opensvc" links = ["/etc/rc0.d/K00opensvc", "/etc/rc3.d/S99opensvc"] logit("rc <%s>"%rc) for l in links: logit("link <%s>"%l) if not os.path.islink(l): if os.path.exists(l): logit("removing %s"%l) os.unlink(l) logit("symlinking %s and %s"%(rc,l)) os.symlink(rc, l) return True def activate_FreeBSD(launcher): logit("begin") return True def activate_Darwin(launcher): logit("begin") return True def update_file(filename, srctext, replacetext): logit("begin") """ replace into filename srctext by replacetext """ import fileinput for line in fileinput.input(filename, inplace=1): if line.rstrip('\n') == srctext.rstrip('\n') : line = replacetext msg=line.rstrip('\n') logit(msg,stdout=True) fileinput.close() def install_params(path2file): logit("begin") """ install template file with tunable variables """ if os.path.exists(path2file): logit("file %s already present"%path2file) return try: f = open(path2file, "w") except: import traceback traceback.print_exc() else: logit("writing new <%s>"%path2file) f.write("# OpenSVC startup and wrapper configuration file\n") f.write("#\n") f.write("# You may need to adapt parameters to fit your environment\n") f.write("# This file is not modified during software upgrades\n") f.write("# If empty, default settings are used in the init script\n\n\n") f.write("\n") f.write("#\n") f.write("# Arguments passed to the 'svcmgr boot' command at system boot\n") f.write("#\n") f.write("#OSVC_BOOT_OPTS=\n") f.write("\n") f.write("#\n") f.write("# If set to true, the OpenSVC launcher will start in the\n") f.write("# background, avoiding timeouts in init managers. The default\n") f.write("# is to launch services in the foreground.\n") f.write("#\n") f.write("#OSVC_BACKGROUND=true\n") f.write("\n") f.write("#\n") f.write("# Wrapper configuration\n") f.write("#\n") f.write("#OSVC_ROOT_PATH=/opt/opensvc\n") f.write("#OSVC_PYTHON=python\n") f.write("#LD_LIBRARY_PATH=\n") f.write("#LD_PRELOAD=\n") f.write("\n") f.close() def install_rc(): logit("begin") """install startup script """ params = None copyrc = True if os.path.exists('/etc/debian_version'): rc = '/etc/init.d/opensvc' params = '/etc/default/opensvc' src = os.path.join(pathini, 'opensvc.init.debian') if systemd_mgmt(): logit("debian with systemd") copyrc = False activate = activate_systemd else: logit("debian with update-rc.d (rely on insserv)") activate = activate_debian elif os.path.exists('/etc/SuSE-release'): rc = '/etc/init.d/opensvc' params = '/etc/sysconfig/opensvc' src = os.path.join(pathini, 'opensvc.init.suse') if systemd_mgmt(): logit("SuSE with systemd") copyrc = False activate = activate_systemd else: logit("SuSE with chkconfig (rely on insserv)") activate = activate_redhat elif os.path.exists('/etc/redhat-release'): params = '/etc/sysconfig/opensvc' src = os.path.join(pathini, 'opensvc.init.redhat') try: f = open('/etc/redhat-release', 'r') buff = f.read() f.close() except: buff = "" if buff.find('Oracle VM server') != -1: rc = '/etc/init.d/zopensvc' activate = activate_ovm else: rc = '/etc/init.d/opensvc' if systemd_mgmt(): logit("Red Hat with systemd") copyrc = False activate = activate_systemd else: logit("Red Hat with chkconfig (rely on insserv)") activate = activate_redhat elif sysname == "HP-UX": rc = '/sbin/init.d/opensvc' src = os.path.join(pathini, 'opensvc.init.hpux') activate = activate_hpux elif sysname == "SunOS": if SolarisRootRelocate is True: rc = os.environ["PKG_INSTALL_ROOT"] + '/etc/init.d/opensvc' params = os.environ["PKG_INSTALL_ROOT"] + '/etc/default/opensvc' src = os.environ["PKG_INSTALL_ROOT"] + os.path.join(pathini, 'opensvc.init.SunOS') else: rc = '/etc/init.d/opensvc' params = '/etc/default/opensvc' src = os.path.join(pathini, 'opensvc.init.SunOS') activate = activate_SunOS elif sysname == "OSF1": rc = '/sbin/init.d/opensvc' src = os.path.join(pathini, 'opensvc.init.OSF1') activate = activate_OSF1 elif sysname == "FreeBSD": rc = '/etc/rc.d/opensvc' params = '/etc/defaults/opensvc' src = os.path.join(pathini, 'opensvc.init.FreeBSD') activate = activate_FreeBSD elif sysname == "AIX": rc = '/etc/rc.d/init.d/opensvc' src = os.path.join(pathini, 'opensvc.init.AIX') activate = activate_AIX elif sysname == "Darwin": rc = '/Library/LaunchDaemons/com.opensvc.svcmgr.plist' params = '/etc/defaults/opensvc' src = os.path.join(pathini, 'darwin.com.opensvc.svcmgr.plist') activate = activate_Darwin elif sysname == 'Windows': return False else: logit("could not select an init script: unsupported operating system",stderr=True) return False if os.path.islink(rc): logit("removing link %s"%rc) os.unlink(rc) if copyrc: logit("copying src launcher script to rc") shutil.copyfile(src, rc) os.chmod(rc, 0755) if params is not None and not os.path.exists(params): logit("installing default parameters file") install_params(params) activate(src) def gen_keys(): logit("begin") if sysname == 'Windows': return home = os.path.expanduser("~root") logit("home <%s>"%home) if SolarisRootRelocate is True: home = os.environ['PKG_INSTALL_ROOT'] + os.path.expanduser("~root") logit("SunOS and relocatable install home is now <%s>"%home) sshhome = os.path.join(home, ".ssh") logit("sshhome <%s>"%sshhome) if not os.path.exists(sshhome): logit("create dir %s"%sshhome,stdout=True) os.makedirs(sshhome, 0700) priv = os.path.join(sshhome, "id_rsa") pub = os.path.join(sshhome, "id_rsa.pub") if os.path.exists(pub) or os.path.exists(priv): logit("either %s or %s already exist"%(pub,priv)) return cmd = ['ssh-keygen', '-t', 'rsa', '-b', '2048', '-P', '""', '-f', priv] try: ret = os.system(' '.join(cmd)) except: logit("Error while trying to generate ssh keys") def missing_dir(pathd): logit("begin") if not os.path.exists(pathd): logit("create dir %s"%pathd,stdout=True) os.makedirs(pathd, 0755) def missing_dirs(): logit("begin") missing_dir(pathlog) missing_dir(pathtmp) missing_dir(pathvar) missing_dir(pathetc) missing_dir(pathlck) def convert_svclinks(): logit("begin") missing_dir(pathetc) svcmgr = os.path.join(pathsbin, 'svcmgr') if not os.path.exists(svcmgr): logit("%s does not exist"%svcmgr) return 1 rcService = os.path.realpath(os.path.join(pathbin, 'rcService')) if not os.path.exists(rcService): logit("%s does not exist"%rcService) return 1 for fname in os.listdir(pathetc): fpath = os.path.join(pathetc, fname) if not os.path.islink(fpath): logit("%s is not a symlink"%fpath) continue rpath = os.path.realpath(fpath) if rpath != rcService: logit("%s != %s"%(rpath,rcService)) continue logit("removing %s"%fpath) os.unlink(fpath) logit("create link %s -> %s"%(fpath,svcmgr)) os.symlink(svcmgr, fpath) def move_env_to_conf(): for fpath in glob.glob(os.path.join(pathetc, "*.env")): svcname = os.path.basename(fpath)[:-4] new_basename = svcname+".conf" new_fpath = os.path.join(pathetc, new_basename) shutil.move(fpath, new_fpath) def move_var_files_in_subdirs(): for fpath in glob.glob(os.path.join(pathvar, "last_*")): dst = os.path.join(pathvar, "node") if not os.path.exists(dst): os.makedirs(dst) fname = os.path.basename(fpath) new_fpath = os.path.join(dst, fname) logit("move %s to %s" % (fpath, new_fpath)) shutil.move(fpath, new_fpath) for fpath in glob.glob(os.path.join(pathvar, "*_last_*")): fname = os.path.basename(fpath) svcname = fname.split("_last_")[0] dst = os.path.join(pathvar, svcname) if not os.path.exists(dst): os.makedirs(dst) fname = fname.replace(svcname+"_", "") new_fpath = os.path.join(dst, fname) logit("move %s to %s" % (fpath, new_fpath)) shutil.move(fpath, new_fpath) for fpath in glob.glob(os.path.join(pathvar, "*.push")): svcname = os.path.basename(fpath).split(".push")[0] dst = os.path.join(pathvar, svcname) if not os.path.exists(dst): os.makedirs(dst) fname = "last_pushed_env" new_fpath = os.path.join(dst, fname) logit("move %s to %s" % (fpath, new_fpath)) shutil.move(fpath, new_fpath) def move_usr_to_opt(): logit("begin") linksvc = os.path.join(os.sep, 'service') old_pathsvc = os.path.join(os.sep, 'usr', 'local', 'opensvc') old_pathvar = os.path.join(old_pathsvc, 'var') old_pathetc = os.path.join(old_pathsvc, 'etc') if os.path.exists(old_pathvar): logit("found old var %s"%old_pathvar) for f in glob.glob(old_pathvar+'/*'): dst = os.path.join(pathvar, os.path.basename(f)) if os.path.exists(dst) and dst.find('host_mode') == -1: logit("file %s already exist"%dst) continue if os.path.isdir(f): logit("copying dir %s to %s"%(f,dst)) shutil.copytree(f, dst, symlinks=True) elif os.path.islink(f): linkto = os.readlink(f) logit("create link %s -> %s"%(dst,linto)) os.symlink(linkto, dst) else: logit("copying file %s to %s"%(f,dst)) shutil.copy2(f, dst) if os.path.exists(old_pathetc): logit("found old etc %s"%old_pathetc) for f in glob.glob(old_pathetc+'/*'): dst = os.path.join(pathetc, os.path.basename(f)) if os.path.exists(dst): logit("file %s already exist"%dst) continue if os.path.islink(f): linkto = os.readlink(f) logit("create link %s -> %s"%(dst,linto)) os.symlink(linkto, dst) elif os.path.isdir(f): logit("copying dir %s to %s"%(f,dst)) shutil.copytree(f, dst, symlinks=True) else: logit("copying file %s to %s"%(f,dst)) shutil.copy2(f, dst) if os.path.exists(old_pathsvc): logit("removing old_pathsvc %s"%old_pathsvc) shutil.rmtree(old_pathsvc) if os.path.islink(linksvc) and os.path.realpath(linksvc) == old_pathsvc: logit("removing linksvc %s"%linksvc) os.unlink(linksvc) def install_etc_path(): logit("begin") p = os.path.join(os.sep, 'etc', 'PATH') if not os.path.exists(p): logit("etc/PATH not found") return try: logit("loading %s"%(p)) f = open(p, "r") buff = f.read() f.close() except: logit("issue met while trying to read %s"%(p),stderr=True) return l = buff.strip().split(":") n = len(l) for op in (pathbin, pathetc): if op in l: logit("dir %s already present in %s"%(op,p)) continue logit("adding dir %s"%(op)) l.append(op) if len(l) == n: logit("nothing changed in %s"%(p)) return try: logit("updating %s"%(p)) f = open(p, "w") f.write(":".join(l)+'\n') f.close() except: logit("issue met while trying to write %s"%(p),stderr=True) return def install_profile(): logit("begin") prof_d = os.path.join(os.sep, 'etc', 'profile.d') prof = os.path.join(prof_d, 'opensvc.sh') buff = "if ! echo ${PATH} | grep -q "+pathetc+"; then"+"\n" buff = buff+" PATH=${PATH}:"+pathetc+"\n" buff = buff+"fi\n\n" buff = buff+"if ! echo ${PATH} | grep -qw "+pathsbin+"; then"+"\n" buff = buff+" PATH=${PATH}:"+pathsbin+"\n" buff = buff+"fi\n" if not os.path.exists(prof_d): logit("no profile directory found") return try: logit("installing profile in file %s"%(prof)) f = open(prof, 'w') f.write(buff) f.close() except: logit("issue met while trying to install profile in file %s"%(prof),stderr=True) f.close() import traceback traceback.print_exc() def install_bash_completion(): logit("begin") if pathsvc is None: return src = os.path.join(pathsvc, 'usr', 'share', 'bash_completion.d', 'opensvc') ds = [os.path.join(os.sep, 'etc', 'bash_completion.d'), os.path.join(os.sep, 'etc', 'bash', 'bash_completion.d')] for d in ds: dst = os.path.join(d, 'opensvc') if not os.path.exists(d): d = None continue else: break if d is None: logit("no bash completion directory found") return try: logit("installing bash completion file src %s to tgt %s"%(src,dst)) shutil.copyfile(src, dst) os.chmod(dst, 0644) except: logit("issue met while trying to install bash completion file src %s to tgt %s"%(src,dst)) def install_link(source, target): logit("begin") if source == '' or target == '': logit("bad parameters") return False if os.path.realpath(source) == os.path.realpath(target): logit("link already ok") return True if os.path.islink(target) or os.path.exists(target): logit("unlink %s",target) os.unlink(target) try: logit("create link %s -> %s"%(target,source)) os.symlink(source,target) except: logit("issue met while trying to symlink src %s with tgt %s"%(source,target)) def install_pythonlink(): logit("begin") if sysname == 'Windows': return install_pythonlink_windows() else: return install_pythonlink_unix() def install_pythonlink_windows(): logit("begin") logit("before appending pathlib to syspath") logit(os.environ["PATH"]) sys.path = [pathlib] + sys.path logit("after appending pathlib to syspath") logit(os.environ["PATH"]) from rcUtilitiesWindows import get_registry_value logit("before reading installfolder in registry") try: installfolder = get_registry_value('HKEY_CURRENT_USER', 'Software\\OpenSVC', 'path') except: logit("issue met while trying to read path into registry HKCU/Software/OpenSVC/path",stderr=True) sys.exit(1) installfolder = installfolder.rstrip('\\') logit("installfolder = <"+installfolder+">") osvcenv = os.path.join(installfolder, 'osvcenv.cmd') content = '@echo off\nset OSVCROOT='+installfolder+'\nset OSVCPYTHONROOT=%OSVCROOT%\python\nset PYTHONPATH=%OSVCROOT%\lib\nset OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe\ncall inpath.cmd OSVCPYTHONROOT' logit(content) f = open(osvcenv, 'w') f.write(content) f.close() def move_host_mode(): logit("begin") hm = os.path.join(pathvar, 'host_mode') cf = os.path.join(pathetc, 'node.conf') nodemgr = os.path.join(pathsbin, 'nodemgr') if not os.path.exists(hm): logit("file %s does not exist"%hm) return try: fp = open(hm, 'r') mode = fp.read().split()[0] fp.close() except: logit("failed to read old host_mode. renamed to %s"%(hm+'.old')) shutil.move(hm, hm+'.old') return cmd = [nodemgr, 'set', '--param', 'node.host_mode', '--value', mode] ret = os.system(' '.join(cmd)) if ret != 0: logit("failed to set host_mode in node.conf",stdout=True) return shutil.move(hm, hm+'.old') def nodeconf_params(): logit("begin") nodeconf = os.path.join(pathetc, 'node.conf') dotnodeconf = os.path.join(pathetc, '.node.conf') # reset etc/.node.conf (autogenerated) if os.path.exists(dotnodeconf): logit("unlink file %s"%dotnodeconf) os.unlink(dotnodeconf) if not os.path.exists(nodeconf): logit("file %s does not exist"%nodeconf) return import ConfigParser import copy try: config = ConfigParser.RawConfigParser() except AttributeError: logit("issue occured while trying to instantiate configparser") return config.read(nodeconf) changed = False # no DEFAULT in etc/node.conf for o in copy.copy(config.defaults()): logit("removing DEFAULT in node.conf") config.remove_option('DEFAULT', o) changed = True # sync section goes to etc/.node.conf if config.has_section('sync'): logit("removing sync in node.conf") config.remove_section('sync') changed = True for s in config.sections(): for o in config.options(s): if o in ['sync_interval', 'push_interval', 'comp_check_interval']: logit("looping %s"%o) v = config.getint(s, o) config.remove_option(s, o) config.set(s, 'interval', v) changed = True if o in ['sync_days', 'push_days', 'comp_check_days']: logit("looping %s"%o) v = config.get(s, o) config.remove_option(s, o) config.set(s, 'days', v) changed = True if o in ['sync_period', 'push_period', 'comp_check_period']: logit("looping %s"%o) v = config.get(s, o) config.remove_option(s, o) config.set(s, 'period', v) changed = True if changed: logit("writing new node.conf") try: fp = open(nodeconf, 'w') config.write(fp) fp.close() except: logit("failed to write new %s"%nodeconf,stderr=True) def save_exc(): logit("begin") import traceback try: import tempfile try: import datetime now = str(datetime.datetime.now()).replace(' ', '-') except: now ="" try: f = tempfile.NamedTemporaryFile(dir=pathtmp, prefix='exc-'+now+'-') except: return f.close() f = open(f.name, 'w') traceback.print_exc(file=f) logit("unexpected error. stack saved in %s"%f.name,stderr=True) f.close() except: logit("unexpected error",stderr=True) traceback.print_exc() def purge_collector_api_cache(): logit("begin") fname = os.path.join(pathvar, "collector") if os.path.exists(fname) and os.path.isfile(fname): logit("unlink file %s"%fname) os.unlink(fname) def chmod_directories(): logit("begin") if not hasattr(os, "walk"): logit("os.walk not available") return if sysname == 'Windows': logit("skip : unsupported on Windows") return for d in (pathbin, pathlib, pathusr): if d is None: continue for dirname, dirnames, filenames in os.walk(d): for subdirname in dirnames: dirpath = os.path.join(dirname, subdirname) try: os.chmod(dirpath, 0755) msg = "setting %s permissions to 0755" % dirpath except: msg = "issue met while trying to set %s permissions to 0755" % dirpath logit(msg) def log_file_info(path): try: info = os.lstat(path) except: msg = "issue met while trying to get [%s] os.lstat information" % path logit(msg) return string = "uid[%d] gid[%d] perms[%s] file[%s]" % (info.st_uid, info.st_gid, oct(info.st_mode & 0777), path) logit(string) def dump_install_content(): logit("begin") if sysname == 'Windows': logit("skip : unsupported on Windows") return if not hasattr(os, "walk"): logit("os.walk not available") return for d in (pathbin, pathlib, pathusr): if d is None: continue for dirname, dirnames, filenames in os.walk(d): for subdirname in dirnames: dirpath = os.path.join(dirname, subdirname) log_file_info(dirpath) for filename in filenames: filepath = os.path.join(dirname, filename) log_file_info(filepath) def convert_to_lsb(): logit("begin") if sysname == 'Windows': logit("skip : unsupported on Windows") return if len(glob.glob(pathetc+"/*")) > 0: logit("skip : skip convert to lsb because /etc/opensvc/ is not empty") return if not os.path.exists("/opt/opensvc"): logit("skip : skip convert to lsb because /opt/opensvc/ does not exist") return for p in glob.glob("/opt/opensvc/etc/*conf") + glob.glob("/opt/opensvc/etc/sssu") + glob.glob("/opt/opensvc/etc/*pem") + glob.glob("/opt/opensvc/etc/*pub"): logit("migrate " + p) shutil.copy(p, pathetc) for p in glob.glob("/opt/opensvc/etc/*.env"): logit("migrate " + p) svcname = os.path.basename(p)[:-4] shutil.copy(os.path.realpath(p), pathetc) os.symlink("/usr/bin/svcmgr", os.path.join(pathetc, svcname)) for p in glob.glob("/opt/opensvc/etc/*.d") + glob.glob("/opt/opensvc/etc/*.dir"): logit("migrate " + p) if os.path.islink(p): bp = os.path.basename(p) linkto = os.readlink(p) if linkto.startswith("/opt/opensvc/etc"): linkto.replace("/opt/opensvc/etc/", "") dst = os.path.join(pathetc, bp) os.symlink(linkto, dst) elif os.path.isdir(p): bp = os.path.basename(p) dst = os.path.join(pathetc, bp) shutil.copytree(p, dst, symlinks=True) else: shutil.copy(p, pathetc) for p in glob.glob("/opt/opensvc/var/*"): if os.path.basename(p) == "btrfs": continue logit("migrate " + p) bp = os.path.basename(p) dst = os.path.join(pathvar, bp) if os.path.exists(dst): continue if os.path.isdir(p): try: shutil.copytree(p, dst, symlinks=True) except: # best effort for var pass else: shutil.copy(p, pathvar) try: move_var_files_in_subdirs() move_usr_to_opt() missing_dirs() convert_svclinks() install_cron() install_rc() gen_keys() install_profile() install_etc_path() install_bash_completion() move_host_mode() nodeconf_params() purge_collector_api_cache() chmod_directories() convert_to_lsb() move_env_to_conf() dump_install_content() logit("\nOpenSVC postinstall terminated\n",stdout=True) except: save_exc() sys.exit(1) opensvc-1.8~20170412/bin/nodemgr0000777000175000017500000000000013073467726020006 2opensvcustar jkelbertjkelbertopensvc-1.8~20170412/bin/postinstall.cmd0000644000175000017500000000066113073467726020077 0ustar jkelbertjkelbert@echo off set OSVCROOT=%~1 if %OSVCROOT:~-1%==\ set OSVCROOT=%OSVCROOT:~0,-1% set OSVCPYTHONROOT=%OSVCROOT%\python set OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe set PYTHONPATH=%OSVCROOT%\lib call "%OSVCROOT%\inpath.cmd" OSVCROOT call "%OSVCROOT%\inpath.cmd" OSVCPYTHONROOT "%OSVCPYTHONEXEC%" "%OSVCROOT%\bin\postinstall" if errorlevel 1 ( echo Failure Reason Given is %errorlevel% pause exit /b %errorlevel% ) exit /b 0 opensvc-1.8~20170412/bin/svcmon0000777000175000017500000000000013073467726017660 2opensvcustar jkelbertjkelbertopensvc-1.8~20170412/var/0000755000175000017500000000000013073467726015053 5ustar jkelbertjkelbertopensvc-1.8~20170412/var/compliance/0000755000175000017500000000000013073467726017165 5ustar jkelbertjkelbertopensvc-1.8~20170412/var/compliance/com.opensvc/0000755000175000017500000000000013073467726021417 5ustar jkelbertjkelbertopensvc-1.8~20170412/var/compliance/com.opensvc/comp.py0000644000175000017500000003171013073467726022731 0ustar jkelbertjkelbert#!/usr/bin/env python from __future__ import print_function import sys import os import re import json import base64 if sys.version_info[0] >= 3: from urllib.request import Request, urlopen from urllib.error import HTTPError from urllib.parse import urlencode else: from urllib2 import Request, urlopen from urllib2 import HTTPError from urllib import urlencode RET_OK = 0 RET_ERR = 1 RET_NA = 2 RET = RET_OK class NotApplicable(Exception): pass class Unfixable(Exception): pass class ComplianceError(Exception): pass class InitError(Exception): pass class EndRecursion(Exception): pass def pinfo(*args, **kwargs): if is_string(args) and len(args): return if isinstance(args, list) and (len(args) == 0 or len(args[0]) == 0): return kwargs["file"] = sys.stdout print(*args, **kwargs) def perror(*args, **kwargs): if is_string(args) and len(args): return if isinstance(args, list) and (len(args) == 0 or len(args[0]) == 0): return kwargs["file"] = sys.stderr print(*args, **kwargs) def is_string(s): """ python[23] compatible """ if sys.version_info[0] == 2: l = (str, unicode) else: l = (str) if isinstance(s, l): return True return False def bdecode(buff): if sys.version_info[0] < 3: return buff else: try: return str(buff, "utf-8") except: return str(buff, "ascii") return buff def bencode(buff): if sys.version_info[0] < 3: return buff else: try: return bytes(buff, "utf-8") except: return bytes(buff, "ascii") return buff class CompObject(object): def __init__(self, prefix=None, data={}): if prefix: self.prefix = prefix.upper() elif "default_prefix" in data: self.prefix = data["default_prefix"].upper() else: self.prefix = "MAGIX12345" self.extra_syntax_parms = data.get("extra_syntax_parms") self.example_value = data.get("example_value", "") self.example_kwargs = data.get("example_kwargs", {}) self.example_env = data.get("example_env", {}) self.description = data.get("description", "(no description)") self.form_definition = data.get("form_definition", "(no form definition)") self.init_done = False def __getattribute__(self, s): if not object.__getattribute__(self, "init_done") and s in ("check", "fix", "fixable"): object.__setattr__(self, "init_done", True) object.__getattribute__(self, "init")() return object.__getattribute__(self, s) def init(self): pass def test(self): self.__init__(**self.example_kwargs) self.prefix = "OSVC_COMP_CO_TEST" for k, v in self.example_env.items(): self.set_env(k, v) self.set_env(self.prefix, self.example_value) return self.check() def info(self): def indent(text): lines = text.split("\n") return "\n".join([" "+line for line in lines]) s = "" s += "Description\n" s += "===========\n" s += "\n" s += indent(self.description)+"\n" s += "\n" s += "Example rule\n" s += "============\n" s += "\n::\n\n" s += indent(json.dumps(json.loads(self.example_value), indent=4, separators=(',', ': ')))+"\n" s += "\n" s += "Form definition\n" s += "===============\n" s += "\n::\n\n" s += indent(self.form_definition)+"\n" s += "\n" pinfo(s) def set_prefix(self, prefix): self.prefix = prefix.upper() def set_env(self, k, v): if sys.version_info[0] < 3: v = v.decode("utf-8") os.environ[k] = v def get_env(self, k): s = os.environ[k] if sys.version_info[0] < 3: s = s.encode("utf-8") return s def get_rules_raw(self): rules = [] for k in [key for key in os.environ if key.startswith(self.prefix)]: s = self.subst(self.get_env(k)) rules += [s] if len(rules) == 0: raise NotApplicable("no rules (%s)" % self.prefix) return rules def encode_data(self, data): if sys.version_info[0] > 2: return data if type(data) == dict: for k in data: if isinstance(data[k], (str, unicode)): data[k] = data[k].encode("utf-8") elif isinstance(data[k], (list, dict)): data[k] = self.encode_data(data[k]) elif type(data) == list: for i, v in enumerate(data): if isinstance(v, (str, unicode)): data[i] = v.encode("utf-8") elif isinstance(data[i], (list, dict)): data[i] = self.encode_data(data[i]) return data def get_rules(self): return [self.encode_data(v[1]) for v in self.get_rule_items()] def get_rule_items(self): rules = [] for k in [key for key in os.environ if key.startswith(self.prefix)]: try: s = self.subst(self.get_env(k)) except Exception as e: perror(k, e) continue try: data = json.loads(s) except ValueError: perror('failed to concatenate', self.get_env(k), 'to rules list') if type(data) == list: for d in data: rules += [(k, d)] else: rules += [(k, data)] if len(rules) == 0: raise NotApplicable("no rules (%s)" % self.prefix) return rules def subst(self, v): """ A rule value can contain references to other rules as %%ENV:OTHER%%. This function substitutes these markers with the referenced rules values, which may themselves contain references. Hence the recursion. """ max_recursion = 10 if type(v) == list: l = [] for _v in v: l.append(self.subst(_v)) return l if type(v) != str and type(v) != unicode: return v p = re.compile('%%ENV:\w+%%', re.IGNORECASE) def _subst(v): matches = p.findall(v) if len(matches) == 0: raise EndRecursion for m in matches: s = m.strip("%").upper().replace('ENV:', '') if s in os.environ: _v = self.get_env(s) elif 'OSVC_COMP_'+s in os.environ: _v = self.get_env('OSVC_COMP_'+s) else: _v = "" raise NotApplicable("undefined substitution variable: %s" % s) v = v.replace(m, _v) return v for i in range(max_recursion): try: v = _subst(v) except EndRecursion: break return v def collector_api(self): if hasattr(self, "collector_api_cache"): return self.collector_api_cache import platform sysname, nodename, x, x, machine, x = platform.uname() try: import ConfigParser except ImportError: import configparser as ConfigParser config = ConfigParser.RawConfigParser({}) if os.path.realpath(__file__).startswith("/opt/opensvc"): config.read("/opt/opensvc/etc/node.conf") else: config.read("/etc/opensvc/node.conf") data = {} data["username"] = nodename data["password"] = config.get("node", "uuid") data["url"] = config.get("node", "dbopensvc").replace("/feed/default/call/xmlrpc", "/init/rest/api") self.collector_api_cache = data return self.collector_api_cache def collector_url(self): api = self.collector_api() s = "%s:%s@" % (api["username"], api["password"]) url = api["url"].replace("https://", "https://"+s) url = url.replace("http://", "http://"+s) return url def collector_request(self, path): api = self.collector_api() url = api["url"] request = Request(url+path) base64string = base64.encodestring('%s:%s' % (api["username"], api["password"])).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) return request def collector_rest_get(self, path): api = self.collector_api() request = self.collector_request(path) if api["url"].startswith("https"): try: import ssl kwargs = {"context": ssl._create_unverified_context()} except: kwargs = {} else: raise ComplianceError("refuse to submit auth tokens through a non-encrypted transport") try: f = urlopen(request, **kwargs) except HTTPError as e: try: err = json.loads(e.read())["error"] e = ComplianceError(err) except: pass raise e import json data = json.loads(f.read()) f.close() return data def collector_rest_get_to_file(self, path, fpath): api = self.collector_api() request = self.collector_request(path) if api["url"].startswith("https"): try: import ssl kwargs = {"context": ssl._create_unverified_context()} except: kwargs = {} else: raise ComplianceError("refuse to submit auth tokens through a non-encrypted transport") try: f = urlopen(request, **kwargs) except HTTPError as e: try: err = json.loads(e.read())["error"] e = ComplianceError(err) except: pass raise e with open(fpath, 'wb') as df: for chunk in iter(lambda: f.read(4096), b""): df.write(chunk) f.close() def collector_safe_uri_to_uuid(self, uuid): if uuid.startswith("safe://"): uuid = uuid.replace("safe://", "") if not uuid.startswith("safe"): raise ComplianceError("malformed safe file uri: %s" % uuid) return uuid def collector_safe_file_download(self, uuid, fpath): uuid = self.collector_safe_uri_to_uuid(uuid) self.collector_rest_get_to_file("/safe/" + uuid + "/download", fpath) def collector_safe_file_get_meta(self, uuid): uuid = self.collector_safe_uri_to_uuid(uuid) data = self.collector_rest_get("/safe/" + uuid) if len(data["data"]) == 0: raise ComplianceError(uuid + ": metadata not found") return data["data"][0] def urlretrieve(self, url, fpath): request = Request(url) kwargs = {} if sys.hexversion >= 0x02070900: import ssl kwargs["context"] = ssl._create_unverified_context() f = urlopen(request, **kwargs) with open(fpath, 'wb') as df: for chunk in iter(lambda: f.read(4096), b""): df.write(chunk) def md5(self, fpath): import hashlib hash = hashlib.md5() with open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): hash.update(chunk) return hash.hexdigest() def main(co): syntax = "syntax:\n" syntax += """ %s check|fix|fixable\n"""%sys.argv[0] syntax += """ %s test|info"""%sys.argv[0] try: o = co() except NotApplicable as e: pinfo(e) sys.exit(RET_NA) if o.extra_syntax_parms: syntax += " "+o.extra_syntax_parms if len(sys.argv) == 2: if sys.argv[1] == 'test': try: RET = o.test() sys.exit(RET) except ComplianceError as e: perror(e) sys.exit(RET_ERR) except NotApplicable: sys.exit(RET_NA) elif sys.argv[1] == 'info': o.info() sys.exit(0) if len(sys.argv) < 3: perror(syntax) sys.exit(RET_ERR) argv = [sys.argv[1]] if len(sys.argv) > 3: argv += sys.argv[3:] o.__init__(*argv) try: if sys.argv[2] == 'check': RET = o.check() elif sys.argv[2] == 'fix': RET = o.fix() elif sys.argv[2] == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except ComplianceError as e: perror(e) sys.exit(RET_ERR) except NotApplicable as e: pinfo(e) sys.exit(RET_NA) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) if __name__ == "__main__": perror("this file is for import into compliance objects") opensvc-1.8~20170412/var/compliance/com.opensvc/timedatectl.py0000755000175000017500000001314313073467726024275 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_TIMEDATECTL_", "example_value": """ { "timezone": "Europe/Paris", "ntpenabled": "no" } """, "description": """* Checks timedatectl settings * Module need to be called with the exposed target settings as variable (timedatectl.py OSVC_COMP_TIMEDATECTL_1 check) """, "form_definition": """ Desc: | A timedatectl rule, fed to the 'timedatectl' compliance object to setup rhel/centos7+ timezone/ntp. Css: comp48 Outputs: - Dest: compliance variable Class: timedatectl Type: json Format: dict Inputs: - Id: timezone Label: Timezone DisplayModeLabel: timezone LabelCss: action16 Mandatory: No Help: 'The timezone name, as listed by "timedatectl list-timezones" command. Example: Europe/Paris' Type: string - Id: ntpenabled Label: NTP Enabled DisplayModeLabel: ntpenabled LabelCss: time16 Mandatory: No Default: "yes" Candidates: - "yes" - "no" Help: "Specify yes or no, to request enabling or disabling the chronyd time service, driven through timedatectl command." Type: string """ } import os import sys from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * from utilities import * class CompTimeDateCtl(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.sysname, self.nodename, x, x, self.machine = os.uname() self.inputs = self.get_rules()[0] if self.sysname not in ['Linux']: perror('module not supported on', self.sysname) raise NotApplicable() if which('timedatectl') is None: perror('timedatectl command not found', self.sysname) raise NotApplicable() self.tz = self.get_valid_tz() self.live = self.get_current_tdctl() def get_current_tdctl(self): """ [root@rhel71 averon]# timedatectl Local time: mar. 2016-03-29 17:13:43 CEST Universal time: mar. 2016-03-29 15:13:43 UTC RTC time: mar. 2016-03-29 15:13:42 Time zone: Europe/Paris (CEST, +0200) NTP enabled: yes NTP synchronized: yes RTC in local TZ: no DST active: yes Last DST change: DST began at dim. 2016-03-27 01:59:59 CET dim. 2016-03-27 03:00:00 CEST Next DST change: DST ends (the clock jumps one hour backwards) at dim. 2016-10-30 02:59:59 CEST dim. 2016-10-30 02:00:00 CET """ current = {} try: cmd = ['timedatectl', 'status'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise out = bdecode(out) for line in out.splitlines(): if 'Time zone:' in line: s = line.split(':')[-1].strip() t = s.split(' ')[0] current['timezone'] = t if 'NTP enabled:' in line: current['ntpenabled'] = line.split(':')[-1].strip() except: perror('can not fetch timedatectl infos') return None return current def get_valid_tz(self): tz = [] try: cmd = ['timedatectl', '--no-pager', 'list-timezones'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise out = bdecode(out) for line in out.splitlines(): curtz = line.strip() if curtz is not '': tz.append(curtz) except: perror('can not build valid timezone list') return None return tz def fixable(self): return RET_NA def check(self): if self.live is None: return RET_NA r = RET_OK for input in self.inputs: r |= self._check(input) return r def _check(self, input): if self.inputs[input] == self.live[input]: pinfo("timedatectl %s is %s, on target" % (input, self.live[input] )) return RET_OK perror("timedatectl %s is %s, target %s" % (input, self.live[input], self.inputs[input])) return RET_ERR def set_tz(self, timezone): try: cmd = ['timedatectl', 'set-timezone', timezone] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise except: perror('could not set timezone') return None return RET_OK def set_ntp(self, value): try: cmd = ['timedatectl', 'set-ntp', value] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise except: perror('could not set ntp') return None return RET_OK def _fix(self, input): r = RET_OK if input in 'timezone': r |= self.set_tz(self.inputs[input]) return r if input in 'ntpenabled': r |= self.set_ntp(self.inputs[input]) return r return RET_NA def fix(self): r = RET_OK if self.check() == RET_ERR: for input in self.inputs: r |= self._fix(input) return r def test(self): print("Not Implemented") if __name__ == "__main__": main(CompTimeDateCtl) opensvc-1.8~20170412/var/compliance/com.opensvc/chkconfig.py0000755000175000017500000000712313073467726023732 0ustar jkelbertjkelbert#!/usr/bin/env python from subprocess import * import sys import os sys.path.append(os.path.dirname(__file__)) from comp import * os.environ['LANG'] = 'C' class InitError(Exception): pass class UnknownService(Exception): pass class SetError(Exception): pass class Chkconfig(object): def __init__(self): self.load() def __str__(self): s = "" for svc in self.services: s += "%-20s %s\n"%(svc, ' '.join(map(lambda x: '%-4s'%x, self.services[svc]))) return s def load(self): self.services = {} p = Popen(['/sbin/chkconfig', '--list'], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: raise InitError() out = bdecode(out) for line in out.splitlines(): words = line.split() if len(words) != 8: continue self.services[words[0]] = [] for w in words[1:]: level, state = w.split(':') self.services[words[0]].append(state) def load_one(self, service): p = Popen(['/sbin/chkconfig', '--list', service], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: out = bdecode(out) if 'not referenced' in out: self.services[service] = ['off', 'off', 'off', 'off', 'off', 'off'] return raise InitError() def activate(self, service): p = Popen(['chkconfig', service, 'on'], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() def set_state(self, service, level, state): curstate = self.get_state(service, level) if curstate == state: return p = Popen(['chkconfig', '--level', level, service, state], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() def get_state(self, service, level): if service not in self.services: try: self.load_one(service) except InitError: pass if service not in self.services: raise UnknownService() return self.services[service][level] def check_state(self, service, levels, state, seq=None, verbose=False): r = 0 for level in levels: try: level = int(level) except: continue try: curstate = self.get_state(service, level) except UnknownService: if verbose: perror("can not get service", service, "runlevels") return 1 if curstate != state: if verbose: perror("service", service, "at runlevel", level, "is in state", curstate, "! target state is", state) r |= 1 else: if verbose: pinfo("service", service, "at runlevel", level, "is in state", curstate) return r def fix_state(self, service, levels, state, seq=None): cmd = ['chkconfig', '--level', levels, service, state] pinfo("exec:", ' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: perror("failed to set", service, "runlevels") pinfo(out) perror(err) return 1 return 0 if __name__ == "__main__": o = Chkconfig() pinfo(o) pinfo('xfs@rc3 =', o.get_state('xfs', 3)) opensvc-1.8~20170412/var/compliance/com.opensvc/linux.mpath.py0000755000175000017500000003734413073467726024256 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_MPATH_", "example_value": """ [ { "key": "defaults.polling_interval", "op": ">=", "value": 20 }, { "key": "device.{HP}.{HSV210.*}.prio", "op": "=", "value": "alua" }, { "key": "blacklist.wwid", "value": 600600000001, "op": "=" } ] """, "description": """* Setup and verify the Linux native multipath configuration """, "form_definition": """ Desc: | A rule to set a list of Linux multipath.conf parameters. Current values can be checked as strictly equal, or superior/inferior to their target value. Outputs: - Dest: compliance variable Type: json Format: list of dict Class: linux_mpath Inputs: - Id: key Label: Key DisplayModeTrim: 64 DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: > The multipath.conf parameter to check. ex: defaults.polling_interval or device.device.{HP}.{HSV210.*} or multipaths.multipath.6006000000000000 or blacklist.wwid or blacklist.device.{HP}.{HSV210.*} - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter current value. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The multipath.conf parameter target value. """, } import os import sys import json import re from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * comment_chars = "#;" sections_tree = { 'defaults': {}, 'blacklist': { 'device': {}, }, 'blacklist_exceptions': { 'device': {}, }, 'devices': { 'device': {}, }, 'multipaths': { 'multipath': {}, }, } class Blacklist(object): def __init__(self, name=""): self.name = name self.wwid = [] self.devnode = [] self.devices = [] def __str__(self): s = "" if len(self.devices) + len(self.wwid) + len(self.devnode) == 0: return s s += self.name + " {\n" for wwid in self.wwid: s += "\twwid " + str(wwid) + "\n" for devnode in self.devnode: s += "\tdevnode " + str(devnode) + "\n" for device in self.devices: s += str(device) s += "}\n" return s class Section(object): def __init__(self, name="", indent=1): self.name = name self.attr = {} self.indent = "" for i in range(indent): self.indent += '\t' def __str__(self): s = "" s += self.indent + self.name + " {\n" for a, v in self.attr.items(): v = str(v) if ' ' in v: v = '"' + v + '"' s += self.indent + "\t" + a + " " + v + "\n" s += self.indent + "}\n" return s class Conf(object): def __init__(self): self.blacklist = Blacklist("blacklist") self.blacklist_exceptions = Blacklist("blacklist_exceptions") self.defaults = Section("defaults", indent=0) self.devices = [] self.multipaths = [] self.changed = False def __str__(self): s = "" s += str(self.defaults) s += str(self.blacklist) s += str(self.blacklist_exceptions) if len(self.devices) > 0: s += "devices {\n" for device in self.devices: s += str(device) s += "}\n" if len(self.multipaths) > 0: s += "multipaths {\n" for multipath in self.multipaths: s += str(multipath) s += "}\n" return s def set(self, key, value): index = self.parse_key(key) key = re.sub(r'\{([^\}]+)\}\.', '', key) l = key.split('.') if key.endswith('}'): a = None else: a = l[-1] if l[1] == "device": o = self.find_device(l[0], index) if o is None: o = Section("device") o.attr['vendor'] = index[0] o.attr['product'] = index[1] _l = self.get_device_list(l[0]) _l.append(o) if a is not None: o.attr[a] = value self.changed = True elif l[1] == "multipath": o = self.find_multipath(index) if o is None: o = Section("multipath") o.attr['wwid'] = index self.multipaths.append(o) o.attr[a] = value self.changed = True elif l[-1] == "wwid": o = getattr(self, l[0]) o.wwid.append(str(value)) self.changed = True elif l[-1] == "devnode": o = getattr(self, l[0]) o.devnode.append(str(value)) self.changed = True elif l[0] == "defaults": self.defaults.attr[a] = value self.changed = True def get(self, key): index = self.parse_key(key) key = re.sub(r'\{([^\}]+)\}\.', '', key) l = key.split('.') if key.endswith('}'): a = None else: a = l[-1] if len(l) < 2: perror("malformed key", key) return if l[1] == "device": o = self.find_device(l[0], index) if o: if a is None: return "" elif a in o.attr: return o.attr[a] elif l[1] == "multipath": o = self.find_multipath(index) if o and a in o.attr: return o.attr[a] elif l[-1] == "wwid": return getattr(self, l[0]).wwid elif l[-1] == "devnode": return getattr(self, l[0]).devnode elif l[0] == "defaults": if a in self.defaults.attr: return self.defaults.attr[a] def find_multipath(self, index): wwid = index for multipath in self.multipaths: if multipath.attr['wwid'] == wwid: return multipath def get_device_list(self, section): l = getattr(self, section) if type(l) != list and hasattr(l, "devices"): l = getattr(l, "devices") if type(l) != list: return return l def find_device(self, section, index): vendor, product = index l = self.get_device_list(section) if not l: return for device in l: if 'vendor' not in device.attr or \ 'product' not in device.attr: continue if device.attr['vendor'] == vendor and \ device.attr['product'] == product: return device def parse_key(self, key): key = key.strip() m = re.search(r'device\.\{([^\}]+)\}\.\{([^\}]+)\}', key) if m: return m.group(1), m.group(2) m = re.search(r'multipath\.\{([^\}]+)\}', key) if m: return m.group(1) class LinuxMpath(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.need_restart = False self.cf = os.path.join(os.sep, 'etc', 'multipath.conf') self.nocf = False self.conf = Conf() self.keys = self.get_rules() self.load_file(self.cf) def fixable(self): return RET_OK def load_file(self, p): if not os.path.exists(p): perror(p, "does not exist") self.nocf = True return with open(p, 'r') as f: buff = f.read() buff = self.strip_comments(buff) self._load_file(buff, sections_tree) def strip_comments(self, buff): lines = buff.split('\n') l = [] for line in lines: line = line.strip() if len(line) == 0: continue discard = False for c in comment_chars: if line[0] == c: discard = True break try: i = line.index(c) line = line[:i] except ValueError: pass if not discard and len(line) > 0: l.append(line) return "\n".join(l) def _load_file(self, buff, sections, chain=[]): for section, subsections in sections.items(): _chain = chain + [section] _buff = buff while True: data = self.load_section(_buff, section) if data is None: break _buff = data[1] self.load_keywords(data[0], subsections, _chain) self._load_file(data[0], subsections, _chain) def load_keywords(self, buff, subsections, chain): keywords = {} keyword = None for line in buff.split('\n'): if len(line) == 0: continue keyword = line.split()[0] if keyword in subsections: continue value = line[len(keyword):].strip().strip('"') if len(value) == 0: continue if keyword in ('wwid', 'devnode') and chain[-1].startswith('blacklist'): if keyword not in keywords: keywords[keyword] = [value] else: keywords[keyword] += [value] else: keywords[keyword] = value if chain[-1] == 'device' and chain[0] == 'devices': s = Section("device") s.attr = keywords self.conf.devices.append(s) elif chain[-1] == 'multipath': s = Section("multipath") s.attr = keywords self.conf.multipaths.append(s) elif chain[-1] == 'device' and chain[0] == 'blacklist': s = Section("device") s.attr = keywords self.conf.blacklist.devices.append(s) elif chain[-1] == 'device' and chain[0] == 'blacklist exceptions': s = Section("device") s.attr = keywords self.conf.blacklist_exceptions.devices.append(s) elif chain[-1] == 'blacklist': if 'wwid' in keywords: self.conf.blacklist.wwid = keywords['wwid'] if 'devnode' in keywords: self.conf.blacklist.devnode = keywords['devnode'] elif chain[-1] == 'blacklist_exceptions': if 'wwid' in keywords: self.conf.blacklist_exceptions.wwid = keywords['wwid'] if 'devnode' in keywords: self.conf.blacklist_exceptions.devnode = keywords['devnode'] elif chain[-1] == 'defaults': self.conf.defaults.attr = keywords def load_section(self, buff, section): l = [] try: start = buff.index(section) except ValueError: return buff = buff[start:] try: buff = buff[buff.index('{')+1:] except ValueError: return depth = 1 for i, c in enumerate(buff): if c == '{': depth += 1 elif c == '}': depth -= 1 if depth == 0: return buff[:i], buff[i+1:] return def _check_key(self, keyname, target, op, value, verbose=True): r = RET_OK if value is None: if verbose: perror("%s is not set"%keyname) return RET_ERR if type(value) == list: if str(target) in value: if verbose: pinfo("%s=%s on target"%(keyname, str(value))) return RET_OK else: if verbose: perror("%s=%s is not set"%(keyname, str(target))) return RET_ERR if op == '=': target = str(target).strip() if str(value) != target: if verbose: perror("%s=%s, target: %s"%(keyname, str(value), target)) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) else: if type(value) != int: if verbose: perror("%s=%s value must be integer"%(keyname, str(value))) r |= RET_ERR elif op == '<=' and value > target: if verbose: perror("%s=%s target: <= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif op == '>=' and value < target: if verbose: perror("%s=%s target: >= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) return r def check_key(self, key, verbose=True): if 'key' not in key: if verbose: perror("'key' not set in rule %s"%str(key)) return RET_NA if 'value' not in key: if verbose: perror("'value' not set in rule %s"%str(key)) return RET_NA if 'op' not in key: op = "=" else: op = key['op'] target = key['value'] if op not in ('>=', '<=', '='): if verbose: perror("'op' must be either '=', '>=' or '<=': %s"%str(key)) return RET_NA keyname = key['key'] value = self.conf.get(keyname) if value is None: if verbose: perror("%s key is not set"%keyname) return RET_ERR r = self._check_key(keyname, target, op, value, verbose=verbose) return r def fix_key(self, key): pinfo("%s=%s set"%(key['key'], key['value'])) self.conf.set(key['key'], key['value']) def check(self): r = 0 for key in self.keys: r |= self.check_key(key, verbose=True) return r def fix(self): for key in self.keys: if self.check_key(key, verbose=False) == RET_ERR: self.fix_key(key) if not self.conf.changed: return if not self.nocf: import datetime backup = self.cf+'.'+str(datetime.datetime.now()) try: import shutil shutil.copy(self.cf, backup) except: perror("failed to backup %s"%self.cf) return RET_ERR pinfo(self.cf, "backed up as %s"%backup) try: with open(self.cf, 'w') as f: f.write(str(self.conf)) pinfo(self.cf, "rewritten") self.need_restart = True except: perror("failed to write %s"%self.cf) if not self.nocf: shutil.copy(backup, self.cf) pinfo("backup restored") return RET_ERR self.restart_daemon() return RET_OK def restart_daemon(self): if not self.need_restart: return candidates = [ "/etc/init.d/multipathd", "/etc/init.d/multipath-tools", ] fpath = None for i in candidates: if os.path.exists(i): fpath = i break if fpath is None: perror("multipath tools startup script not found") return RET_ERR pinfo("restarting multipath daemon") cmd = [fpath, "restart"] p = Popen(cmd, stdin=None, stdout=PIPE, stderr=PIPE) out, err = p.communicate() err = bdecode(err) if len(err) > 0: perror(err) if __name__ == "__main__": main(LinuxMpath) opensvc-1.8~20170412/var/compliance/com.opensvc/user.py0000755000175000017500000004024013073467726022752 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_USER_", "example_value": """ { "tibco": { "shell": "/bin/ksh", "gecos":"agecos" }, "tibco1": { "shell": "/bin/tcsh", "gecos": "another gecos" } } """, "description": """* Verify a local system user configuration * A minus (-) prefix to the user name indicates the user should not exist Environment variable modifying the object behaviour: * OSVC_COMP_USERS_INITIAL_PASSWD=true|false """, "form_definition": """ Desc: | A rule defining a list of Unix users and their properties. Used by the users and group_membership compliance objects. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: dict of dict Key: user EmbedKey: No Class: user Inputs: - Id: user Label: User name DisplayModeLabel: user LabelCss: guy16 Mandatory: Yes Type: string Help: The Unix user name. - Id: uid Label: User id DisplayModeLabel: uid LabelCss: guy16 Mandatory: Yes Type: string or integer Help: The Unix uid of this user. - Id: gid Label: Group id DisplayModeLabel: gid LabelCss: guys16 Mandatory: Yes Type: string or integer Help: The Unix principal gid of this user. - Id: shell Label: Login shell DisplayModeLabel: shell LabelCss: action16 Type: string Help: The Unix login shell for this user. - Id: home Label: Home directory DisplayModeLabel: home LabelCss: action16 Type: string Help: The Unix home directory full path for this user. - Id: password Label: Password hash DisplayModeLabel: pwd LabelCss: action16 Type: string Help: The password hash for this user. It is recommanded to set it to '!!' or to set initial password to change upon first login. Leave empty to not check nor set the password. - Id: gecos Label: Gecos DisplayModeLabel: gecos LabelCss: action16 Type: string Help: A one-line comment field describing the user. - Id: check_home Label: Enforce homedir ownership DisplayModeLabel: home ownership LabelCss: action16 Type: string Default: yes Candidates: - "yes" - "no" Help: Toggles the user home directory ownership checking. """, } import os import sys import json import pwd import re from utilities import which try: import spwd cap_shadow = True except: cap_shadow = False from subprocess import Popen, list2cmdline, PIPE sys.path.append(os.path.dirname(__file__)) from comp import * blacklist = [ "root", "bin", "daemon", "adm", "lp", "sync", "shutdown", "halt", "mail", "news", "uucp", "operator", "nobody", "nscd", "vcsa", "pcap", "mailnull", "smmsp", "sshd", "rpc", "avahi", "rpcuser", "nfsnobody", "haldaemon", "avahi-autoipd", "ntp" ] class CompUser(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.pwt = { 'shell': 'pw_shell', 'home': 'pw_dir', 'uid': 'pw_uid', 'gid': 'pw_gid', 'gecos': 'pw_gecos', 'password': 'pw_passwd', } self.spwt = { 'spassword': 'sp_pwd', } self.usermod_p = { 'shell': '-s', 'home': '-d', 'uid': '-u', 'gid': '-g', 'gecos': '-c', 'password': '-p', 'spassword': '-p', } self.sysname, self.nodename, x, x, self.machine = os.uname() if "OSVC_COMP_USERS_INITIAL_PASSWD" in os.environ and \ os.environ["OSVC_COMP_USERS_INITIAL_PASSWD"] == "true": self.initial_passwd = True else: self.initial_passwd = False if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1', 'FreeBSD']: perror('module not supported on', self.sysname) raise NotApplicable() if self.sysname == "FreeBSD": self.useradd = ["pw", "useradd"] self.usermod = ["pw", "usermod"] self.userdel = ["pw", "userdel"] else: self.useradd = ["useradd"] self.usermod = ["usermod"] self.userdel = ["userdel"] self.users = {} for d in self.get_rules(): for user in d: if user not in self.users: self.users[user] = d[user] else: for key in self.usermod_p.keys(): if key in d[user] and key not in self.users[user]: self.users[user][key] = d[user][key] for user, d in self.users.items(): for k in ('uid', 'gid'): if k in self.users[user]: self.users[user][k] = int(d[k]) if "password" in d and len(d["password"]) == 0: del(self.users[user]["password"]) if cap_shadow: if "password" in d and len(d["password"]) > 0 and \ ("spassword" not in d or len(d["spassword"]) == 0): self.users[user]["spassword"] = self.users[user]["password"] del self.users[user]["password"] if "spassword" not in d: self.users[user]["spassword"] = "x" else: if "spassword" in d and len(d["spassword"]) > 0 and \ ("password" not in d or len(d["password"]) == 0): self.users[user]["password"] = self.users[user]["spassword"] del self.users[user]["spassword"] if "password" not in d: self.users[user]["password"] = "x" def fixable(self): if not which(self.usermod[0]): perror(self.usermod[0], "program not found") return RET_ERR return RET_OK def grpconv(self): if not cap_shadow or not os.path.exists('/etc/gshadow'): return if not which('grpconv'): return with open('/etc/group', 'r') as f: buff = f.read() l = [] for line in buff.split('\n'): u = line.split(':')[0] if u in l: perror("duplicate group %s in /etc/group. skip grpconv (grpconv bug workaround)"%u) return l.append(u) p = Popen(['grpconv']) p.communicate() def pwconv(self): if not cap_shadow or not os.path.exists('/etc/shadow'): return if not which('pwconv'): return p = Popen(['pwconv']) p.communicate() def fix_item(self, user, item, target): if item in ["password", "spassword"]: if self.initial_passwd: pinfo("skip", user, "password modification in initial_passwd mode") return RET_OK if target == "x": return RET_OK if self.sysname in ("AIX"): return RET_OK cmd = [] + self.usermod if self.sysname == "FreeBSD": cmd.append(user) cmd += [self.usermod_p[item], str(target)] if item == 'home': cmd.append('-m') if self.sysname != "FreeBSD": cmd.append(user) pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode self.pwconv() self.grpconv() if r == 0: return RET_OK else: return RET_ERR def check_item(self, user, item, target, current, verbose=False): if type(current) == int and current < 0: current += 4294967296 if sys.version_info[0] < 3 and type(current) == str and type(target) == unicode: current = unicode(current, errors="ignore") if target == current: if verbose: pinfo('user', user, item+':', current) return RET_OK elif "passw" in item and target == "!!" and current == "": if verbose: pinfo('user', user, item+':', current) return RET_OK else: if verbose: perror('user', user, item+':', current, 'target:', target) return RET_ERR def check_user_del(self, user, verbose=True): r = 0 try: userinfo=pwd.getpwnam(user) except KeyError: if verbose: pinfo('user', user, 'does not exist, on target') return RET_OK if verbose: perror('user', user, "exists, shouldn't") return RET_ERR def check_user(self, user, props, verbose=True): if user.startswith('-'): return self.check_user_del(user.lstrip('-'), verbose=verbose) r = 0 try: userinfo=pwd.getpwnam(user) except KeyError: if self.try_create_user(props): if verbose: perror('user', user, 'does not exist') return RET_ERR else: if verbose: perror('user', user, 'does not exist and not enough info to create it') return RET_ERR for prop in self.pwt: if prop in props: if prop == "password": if self.initial_passwd: if verbose: pinfo("skip", user, "passwd checking in initial_passwd mode") continue if props[prop] == "x": continue r |= self.check_item(user, prop, props[prop], getattr(userinfo, self.pwt[prop]), verbose=verbose) if 'check_home' not in props or props['check_home'] == "yes": r |= self.check_home_ownership(user, verbose=verbose) if not cap_shadow: return r try: usersinfo=spwd.getspnam(user) except KeyError: if "spassword" in props: if verbose: perror(user, "not declared in /etc/shadow") r |= RET_ERR usersinfo = None if usersinfo is not None: for prop in self.spwt: if prop in props: if prop == "spassword": if self.initial_passwd: if verbose: pinfo("skip", user, "spasswd checking in initial_passwd mode") continue if props[prop] == "x": continue r |= self.check_item(user, prop, props[prop], getattr(usersinfo, self.spwt[prop]), verbose=verbose) return r def try_create_user(self, props): # # don't try to create user if passwd db is not 'files' # beware: 'files' db is the implicit default # if 'db' in props and props['db'] != 'files': return False return True def get_uid(self, user): import pwd try: info=pwd.getpwnam(user) uid = info[2] except: perror("user %s does not exist"%user) raise ComplianceError() return uid def check_home_ownership(self, user, verbose=True): path = os.path.expanduser("~"+user) if not os.path.exists(path): if verbose: perror(path, "homedir does not exist") return RET_ERR tuid = self.get_uid(user) uid = os.stat(path).st_uid if uid != tuid: if verbose: perror(path, 'uid should be %s but is %s'%(str(tuid), str(uid))) return RET_ERR if verbose: pinfo(path, 'owner is', user) return RET_OK def fix_home_ownership(self, user): if self.check_home_ownership(user, verbose=False) == RET_OK: return RET_OK uid = self.get_uid(user) path = os.path.expanduser("~"+user) if not os.path.exists(path): if os.path.exists("/etc/skel"): cmd = ['cp', '-R', '/etc/skel/', path] pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r != 0: return RET_ERR cmd = ['chown', '-R', str(uid), path] pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r != 0: return RET_ERR else: os.makedirs(path) os.chown(path, uid, -1) return RET_OK def unlock_user(self, user): if self.sysname != "SunOS": return cmd = ["uname", "-r"] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: return if out.strip() == '5.8': unlock_opt = '-d' else: unlock_opt = '-u' cmd = ["passwd", unlock_opt, user] pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: return RET_OK else: return RET_ERR def create_user(self, user, props): cmd = [] + self.useradd if self.sysname == "FreeBSD": cmd += [user] for item in props: if item == "check_home": continue prop = str(props[item]) if len(prop) == 0: continue if item.endswith("password") and self.sysname in ("AIX", "SunOS", "OSF1"): continue cmd = cmd + self.usermod_p[item].split() + [prop] if item == "home": cmd.append("-m") if self.sysname != "FreeBSD": cmd += [user] pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: if self.unlock_user(user) == RET_ERR: return RET_ERR return RET_OK else: return RET_ERR def fix_user_del(self, user): if user in blacklist: perror("delete", user, "... cowardly refusing") return RET_ERR cmd = self.userdel + [user] pinfo(list2cmdline(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: return RET_OK else: return RET_ERR def fix_user(self, user, props): if user.startswith('-'): return self.fix_user_del(user.lstrip('-')) r = 0 try: userinfo = pwd.getpwnam(user) except KeyError: if self.try_create_user(props): return self.create_user(user, props) else: pinfo('user', user, 'does not exist and not enough info to create it') return RET_OK for prop in self.pwt: if prop in props and \ self.check_item(user, prop, props[prop], getattr(userinfo, self.pwt[prop])) != RET_OK: r |= self.fix_item(user, prop, props[prop]) if 'check_home' not in props or props['check_home'] == "yes": r |= self.fix_home_ownership(user) if not cap_shadow: return r try: usersinfo = spwd.getspnam(user) except KeyError: if "spassword" in props: self.fix_item(user, "spassword", props["spassword"]) usersinfo = spwd.getspnam(user) else: usersinfo = None if usersinfo is not None: for prop in self.spwt: if prop in props and \ self.check_item(user, prop, props[prop], getattr(usersinfo, self.spwt[prop])) != RET_OK: r |= self.fix_item(user, prop, props[prop]) return r def check(self): r = 0 for user, props in self.users.items(): r |= self.check_user(user, props) return r def fix(self): r = 0 for user, props in self.users.items(): if self.check_user(user, props, verbose=False) == RET_ERR: r |= self.fix_user(user, props) return r if __name__ == "__main__": main(CompUser) opensvc-1.8~20170412/var/compliance/com.opensvc/authkey.py0000755000175000017500000004165213073467726023456 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_AUTHKEY_", "example_value": """ { "action": "add", "authfile": "authorized_keys", "user": "testuser", "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAPiO1jlT+5yrdPLfQ7sYF52NkfCEzT0AUUNIl+14Sbkubqe+TcU7U3taUtiDJ5YOGOzIVFIDGGtwD0AqNHQbvsiS1ywtC5BJ9362FlrpVH4o1nVZPvMxRzz5hgh3HjxqIWqwZDx29qO8Rg1/g1Gm3QYCxqPFn2a5f2AUiYqc1wtxAAAAFQC49iboZGNqssicwUrX6TUrT9H0HQAAAIBo5dNRmTF+Vd/+PI0JUOIzPJiHNKK9rnySlaxSDml9hH2LuDSjYz7BWuNP8UnPOa2pcFA4meDp5u8d5dGOWxkuYO0bLnXwDZuHtDW/ySytjwEaBLPxoqRBAyfyQNlusGsuiqDYRA7j7bS0RxINBxvDw79KdyQhuOn8/lKVG+sjrQAAAIEAoShly/JlGLQxQzPyWADV5RFlaRSPaPvFzcYT3hS+glkVd6yrCbzc30Yc8Ndu4cflQiXSZzRoUMgsy5PzuiH1M8JjwHTGNl8r9OfJpnN/OaAhMpIyA06y1ZZD9iEME3UmthFQoZnfRuE3yxi7bqyXJU4rOq04iyCTpU1UKInPdXQ= testuser" } """, "description": """* Installs or removes ssh public keys from authorized_key files * Looks up the authorized_key and authorized_key2 file location in the running sshd daemon configuration. * Add user to sshd_config AllowUser and AllowGroup if used * Reload sshd if sshd_config has been changed """, "form_definition": """ Desc: | Describe a list of ssh public keys to authorize login as the specified Unix user. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: dict Class: authkey Inputs: - Id: action Label: Action DisplayModeLabel: action LabelCss: action16 Mandatory: Yes Type: string Candidates: - add - del Help: Defines wether the public key must be installed or uninstalled. - Id: user Label: User DisplayModeLabel: user LabelCss: guy16 Mandatory: Yes Type: string Help: Defines the Unix user name who will accept those ssh public keys. - Id: key Label: Public key DisplayModeLabel: key LabelCss: guy16 Mandatory: Yes Type: text DisplayModeTrim: 60 Help: The ssh public key as seen in authorized_keys files. - Id: authfile Label: Authorized keys file name DisplayModeLabel: authfile LabelCss: hd16 Mandatory: Yes Candidates: - authorized_keys - authorized_keys2 Default: authorized_keys2 Type: string Help: The authorized_keys file to write the keys into. """ } import os import sys import pwd, grp import datetime import shutil from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class CompAuthKeys(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.authkeys = self.get_rules() for ak in self.authkeys: ak['key'] = ak['key'].replace('\n', '') self.installed_keys_d = {} self.default_authfile = "authorized_keys2" self.allowusers_check_done = [] self.allowusers_fix_todo = [] self.allowgroups_check_done = [] self.allowgroups_fix_todo = [] def sanitize(self, ak): if 'user' not in ak: perror("no user set in rule") return False if 'key' not in ak: perror("no key set in rule") return False if 'action' not in ak: ak['action'] = 'add' if 'authfile' not in ak: ak['authfile'] = self.default_authfile if ak['authfile'] not in ("authorized_keys", "authorized_keys2"): perror("unsupported authfile:", ak['authfile'], "(default to", self.default_authfile+")") ak['authfile'] = self.default_authfile for key in ('user', 'key', 'action', 'authfile'): ak[key] = ak[key].strip() return ak def fixable(self): return RET_NA def truncate_key(self, key): if len(key) < 50: s = key else: s = "'%s ... %s'" % (key[0:17], key[-30:]) return s def reload_sshd(self): cmd = ['ps', '-ef'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: perror("can not find sshd process") return RET_ERR out = bdecode(out) for line in out.splitlines(): if not line.endswith('sbin/sshd'): continue l = line.split() pid = int(l[1]) name = l[-1] pinfo("send sighup to pid %d (%s)" % (pid, name)) os.kill(pid, 1) return RET_OK perror("can not find sshd process to signal") return RET_ERR def get_sshd_config(self): cfs = [] if hasattr(self, "cache_sshd_config_f"): return self.cache_sshd_config_f cmd = ['ps', '-eo', 'comm'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode == 0: out = bdecode(out) l = out.splitlines() if '/usr/local/sbin/sshd' in l: cfs.append(os.path.join(os.sep, 'usr', 'local', 'etc', 'sshd_config')) if '/usr/sfw/sbin/sshd' in l: cfs.append(os.path.join(os.sep, 'etc', 'sshd_config')) cfs += [os.path.join(os.sep, 'etc', 'ssh', 'sshd_config'), os.path.join(os.sep, 'opt', 'etc', 'sshd_config'), os.path.join(os.sep, 'etc', 'opt', 'ssh', 'sshd_config'), os.path.join(os.sep, 'usr', 'local', 'etc', 'sshd_config')] cf = None for _cf in cfs: if os.path.exists(_cf): cf = _cf break self.cache_sshd_config_f = cf if cf is None: perror("sshd_config not found") return None return cf def _get_authkey_file(self, key): if key == "authorized_keys": # default return ".ssh/authorized_keys" elif key == "authorized_keys2": key = "AuthorizedKeysFile" else: perror("unknown key", key) return None cf = self.get_sshd_config() if cf is None: perror("sshd_config not found") return None with open(cf, 'r') as f: buff = f.read() for line in buff.split('\n'): l = line.split() if len(l) != 2: continue if l[0].strip() == key: return l[1] # not found, return default return ".ssh/authorized_keys2" def get_allowusers(self): if hasattr(self, "cache_allowusers"): return self.cache_allowusers cf = self.get_sshd_config() if cf is None: perror("sshd_config not found") return None with open(cf, 'r') as f: buff = f.read() for line in buff.split('\n'): l = line.split() if len(l) < 2: continue if l[0].strip() == "AllowUsers": self.cache_allowusers = l[1:] return l[1:] self.cache_allowusers = None return None def get_allowgroups(self): if hasattr(self, "cache_allowgroups"): return self.cache_allowgroups cf = self.get_sshd_config() if cf is None: perror("sshd_config not found") return None with open(cf, 'r') as f: buff = f.read() for line in buff.split('\n'): l = line.split() if len(l) < 2: continue if l[0].strip() == "AllowGroups": self.cache_allowgroups = l[1:] return l[1:] self.cache_allowgroups = None return None def get_authkey_file(self, key, user): p = self._get_authkey_file(key) if p is None: return None p = p.replace('%u', user) p = p.replace('%h', os.path.expanduser('~'+user)) p = p.replace('~', os.path.expanduser('~'+user)) if not p.startswith('/'): p = os.path.join(os.path.expanduser('~'+user), p) return p def get_authkey_files(self, user): l = [] p = self.get_authkey_file('authorized_keys', user) if p is not None: l.append(p) p = self.get_authkey_file('authorized_keys2', user) if p is not None: l.append(p) return l def get_installed_keys(self, user): if user in self.installed_keys_d: return self.installed_keys_d[user] else: self.installed_keys_d[user] = [] ps = self.get_authkey_files(user) for p in ps: if not os.path.exists(p): continue with open(p, 'r') as f: self.installed_keys_d[user] += f.read().splitlines() return self.installed_keys_d[user] def get_user_group(self, user): gid = pwd.getpwnam(user).pw_gid try: gname = grp.getgrgid(gid).gr_name except KeyError: gname = None return gname def fix_allowusers(self, ak, verbose=True): self.check_allowuser(ak, verbose=False) if not ak['user'] in self.allowusers_fix_todo: return RET_OK self.allowusers_fix_todo.remove(ak['user']) au = self.get_allowusers() if au is None: return RET_OK l = ["AllowUsers"] + au + [ak['user']] s = " ".join(l) pinfo("adding", ak['user'], "to currently allowed users") cf = self.get_sshd_config() if cf is None: perror("sshd_config not found") return None with open(cf, 'r') as f: buff = f.read() lines = buff.split('\n') for i, line in enumerate(lines): l = line.split() if len(l) < 2: continue if l[0].strip() == "AllowUsers": lines[i] = s buff = "\n".join(lines) backup = cf+'.'+str(datetime.datetime.now()) shutil.copy(cf, backup) with open(cf, 'w') as f: f.write(buff) self.reload_sshd() return RET_OK def fix_allowgroups(self, ak, verbose=True): self.check_allowgroup(ak, verbose=False) if not ak['user'] in self.allowgroups_fix_todo: return RET_OK self.allowgroups_fix_todo.remove(ak['user']) ag = self.get_allowgroups() if ag is None: return RET_OK ak['group'] = self.get_user_group(ak['user']) if ak['group'] is None: perror("can not set AllowGroups in sshd_config: primary group of user %s not found" % ak['user']) return RET_ERR l = ["AllowGroups"] + ag + [ak['group']] s = " ".join(l) pinfo("adding", ak['group'], "to currently allowed groups") cf = self.get_sshd_config() if cf is None: perror("sshd_config not found") return RET_ERR with open(cf, 'r') as f: buff = f.read() lines = buff.split('\n') for i, line in enumerate(lines): l = line.split() if len(l) < 2: continue if l[0].strip() == "AllowGroups": lines[i] = s buff = "\n".join(lines) backup = cf+'.'+str(datetime.datetime.now()) shutil.copy(cf, backup) with open(cf, 'w') as f: f.write(buff) self.reload_sshd() return RET_OK def check_allowuser(self, ak, verbose=True): if ak['user'] in self.allowusers_check_done: return RET_OK self.allowusers_check_done.append(ak['user']) au = self.get_allowusers() if au is None: return RET_OK elif ak['user'] in au: if verbose: pinfo(ak['user'], "is correctly set in sshd AllowUsers") r = RET_OK else: if verbose: perror(ak['user'], "is not set in sshd AllowUsers") self.allowusers_fix_todo.append(ak['user']) r = RET_ERR return r def check_allowgroup(self, ak, verbose=True): if ak['user'] in self.allowgroups_check_done: return RET_OK self.allowgroups_check_done.append(ak['user']) ag = self.get_allowgroups() if ag is None: return RET_OK ak['group'] = self.get_user_group(ak['user']) if ak['group'] is None: if verbose: perror("can not determine primary group of user %s to add to AllowGroups" % ak['user']) return RET_ERR elif ak['group'] in ag: if verbose: pinfo(ak['group'], "is correctly set in sshd AllowGroups") r = RET_OK else: if verbose: perror(ak['group'], "is not set in sshd AllowGroups") self.allowgroups_fix_todo.append(ak['user']) r = RET_ERR return r def check_authkey(self, ak, verbose=True): ak = self.sanitize(ak) installed_keys = self.get_installed_keys(ak['user']) if ak['action'] == 'add': if ak['key'] not in installed_keys: if verbose: perror('key', self.truncate_key(ak['key']), 'must be installed for user', ak['user']) r = RET_ERR else: if verbose: pinfo('key', self.truncate_key(ak['key']), 'is correctly installed for user', ak['user']) r = RET_OK elif ak['action'] == 'del': if ak['key'] in installed_keys: if verbose: perror('key', self.truncate_key(ak['key']), 'must be uninstalled for user', ak['user']) r = RET_ERR else: if verbose: pinfo('key', self.truncate_key(ak['key']), 'is correctly not installed for user', ak['user']) r = RET_OK else: perror("unsupported action:", ak['action']) return RET_ERR return r def fix_authkey(self, ak): ak = self.sanitize(ak) if ak['action'] == 'add': r = self.add_authkey(ak) return r elif ak['action'] == 'del': return self.del_authkey(ak) else: perror("unsupported action:", ak['action']) return RET_ERR def add_authkey(self, ak): if self.check_authkey(ak, verbose=False) == RET_OK: return RET_OK try: userinfo=pwd.getpwnam(ak['user']) except KeyError: perror('user', ak['user'], 'does not exist') return RET_ERR p = self.get_authkey_file(ak['authfile'], ak['user']) if p is None: perror("could not determine", ak['authfile'], "location") return RET_ERR base = os.path.dirname(p) if not os.path.exists(base): os.makedirs(base, 0o0700) pinfo(base, "created") if p.startswith(os.path.expanduser('~'+ak['user'])): os.chown(base, userinfo.pw_uid, userinfo.pw_gid) pinfo(base, "ownership set to %d:%d"%(userinfo.pw_uid, userinfo.pw_gid)) if not os.path.exists(p): with open(p, 'w') as f: f.write("") pinfo(p, "created") os.chmod(p, 0o0600) pinfo(p, "mode set to 0600") os.chown(p, userinfo.pw_uid, userinfo.pw_gid) pinfo(p, "ownetship set to %d:%d"%(userinfo.pw_uid, userinfo.pw_gid)) with open(p, 'a') as f: f.write(ak['key']) if not ak['key'].endswith('\n'): f.write('\n') pinfo('key', self.truncate_key(ak['key']), 'installed for user', ak['user']) return RET_OK def del_authkey(self, ak): if self.check_authkey(ak, verbose=False) == RET_OK: pinfo('key', self.truncate_key(ak['key']), 'is already not installed for user', ak['user']) return RET_OK ps = self.get_authkey_files(ak['user']) for p in ps: base = os.path.basename(p) if not os.path.exists(p): continue with open(p, 'r') as f: l = f.read().split('\n') n = len(l) while True: try: l.remove(ak['key'].replace('\n', '')) except ValueError: break if len(l) == n: # nothing changed continue with open(p, 'w') as f: f.write('\n'.join(l)) pinfo('key', self.truncate_key(ak['key']), 'uninstalled for user', ak['user']) return RET_OK def check(self): r = 0 for ak in self.authkeys: r |= self.check_authkey(ak) if ak['action'] == 'add': r |= self.check_allowgroup(ak) r |= self.check_allowuser(ak) return r def fix(self): r = 0 for ak in self.authkeys: r |= self.fix_authkey(ak) if ak['action'] == 'add': r |= self.fix_allowgroups(ak) r |= self.fix_allowusers(ak) return r if __name__ == "__main__": main(CompAuthKeys) opensvc-1.8~20170412/var/compliance/com.opensvc/group_membership.py0000755000175000017500000001732413073467726025352 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_GROUP_", "example_value": """ { "tibco": { "members": ["tibco", "tibco1"] }, "tibco1": { "members": ["tibco1"] } } """, "description": """* Verify a local system group configuration * A minus (-) prefix to the group name indicates the user should not exist """, "form_definition": """ Desc: | A rule defining a list of Unix groups and their user membership. The referenced users and groups must exist. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: dict of dict Key: group EmbedKey: No Class: group_membership Inputs: - Id: group Label: Group name DisplayModeLabel: group LabelCss: guys16 Mandatory: Yes Type: string Help: The Unix group name. - Id: members Label: Group members DisplayModeLabel: members LabelCss: guy16 Type: list of string Help: A comma-separed list of Unix user names members of this group. """, } import os import sys import json import grp from subprocess import * from utilities import which sys.path.append(os.path.dirname(__file__)) from comp import * class CompGroupMembership(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.member_of_h = {} self.grt = { 'members': 'gr_mem', } self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1']: perror('group_membership: compliance object not supported on', self.sysname) raise NotApplicable self.groups = {} for d in self.get_rules(): if type(d) != dict: continue for k, v in d.items(): if "members" not in v: continue for i, m in enumerate(v["members"]): d[k]["members"][i] = m.strip() self.groups.update(d) if os.path.exists('/usr/xpg4/bin/id'): self.id_bin = '/usr/xpg4/bin/id' else: self.id_bin = 'id' def get_primary_group(self, user): cmd = [self.id_bin, "-gn", user] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: return return out.strip() def member_of(self, user, refresh=False): if not refresh and user in self.member_of_h: # cache hit return self.member_of_h[user] eg = self.get_primary_group(user) if eg is None: self.member_of_h[user] = [] return [] cmd = [self.id_bin, "-Gn", user] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: self.member_of_h[user] = [] return self.member_of_h[user] ag = set(out.strip().split()) ag -= set([eg]) self.member_of_h[user] = ag return self.member_of_h[user] def fixable(self): return RET_NA def del_member(self, group, user): ag = self.member_of(user) if len(ag) == 0: return 0 g = ag - set([group]) g = ','.join(g) return self.fix_member(g, user) def add_member(self, group, user): if 0 != self._check_member_accnt(user): perror('group', group+':', 'cannot add inexistant user "%s"'%user) return RET_ERR if self.get_primary_group(user) == group: pinfo("group %s is already the primary group of user %s: skip declaration as a secondary group (you may want to change your rule)" % (group, user)) return RET_OK ag = self.member_of(user) g = ag | set([group]) g = ','.join(g) return self.fix_member(g, user) def fix_member(self, g, user): cmd = ['usermod', '-G', g, user] pinfo("group_membership:", ' '.join(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode ag = self.member_of(user, refresh=True) if r == 0: return RET_OK else: return RET_ERR def fix_members(self, group, target): r = 0 for user in target: if group in self.member_of(user): continue r += self.add_member(group, user) return r def fix_item(self, group, item, target): if item == 'members': return self.fix_members(group, target) else: perror("group_membership:", 'no fix implemented for', item) return RET_ERR def _check_member_accnt(self, user): if which('getent'): xcmd = ['getent', 'passwd', user] elif which('pwget'): xcmd = ['pwget', '-n', user] else: return 0 xp = Popen(xcmd, stdout=PIPE, stderr=PIPE, close_fds=True) xout, xerr = xp.communicate() return xp.returncode def _check_members_accnts(self, group, user_list, which, verbose): r = RET_OK for user in user_list: rc = self._check_member_accnt(user) if rc != 0: r |= RET_ERR if verbose: perror('group', group, '%s member "%s" does not exist'%(which, user)) return r def filter_target(self, group, target): new_target = [] for user in target: pg = self.get_primary_group(user) if pg == group: continue new_target.append(user) discarded = set(target)-set(new_target) if len(discarded) > 0: pinfo("group %s members discarded: %s, as they already use this group as primary (you may want to change your rule)" % (group, ', '.join(discarded))) return new_target def check_item(self, group, item, target, current, verbose=False): r = RET_OK if item == 'members': r |= self._check_members_accnts(group, current, 'existing', verbose) r |= self._check_members_accnts(group, target, 'target', verbose) if not isinstance(current, list): current = [current] target = self.filter_target(group, target) if set(target) <= set(current): if verbose: pinfo('group', group, item+':', ', '.join(current)) return r else: if verbose: perror('group', group, item+':', ', '.join(current), '| target:', ', '.join(target)) return r|RET_ERR def check_group(self, group, props): r = 0 try: groupinfo = grp.getgrnam(group) except KeyError: pinfo('group', group, 'does not exist') return RET_OK for prop in self.grt: if prop in props: r |= self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop]), verbose=True) return r def fix_group(self, group, props): r = 0 try: groupinfo = grp.getgrnam(group) except KeyError: pinfo('group', group, 'does not exist') return RET_OK for prop in self.grt: if prop in props and \ self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop])) != RET_OK: r |= self.fix_item(group, prop, props[prop]) return r def check(self): r = 0 for group, props in self.groups.items(): r |= self.check_group(group, props) return r def fix(self): r = 0 for group, props in self.groups.items(): r |= self.fix_group(group, props) return r if __name__ == "__main__": main(CompGroupMembership) opensvc-1.8~20170412/var/compliance/com.opensvc/sysctl.py0000755000175000017500000002024213073467726023315 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_SYSCTL_", "example_value": """ { "key": "vm.lowmem_reserve_ratio", "index": 1, "op": ">", "value": 256 } """, "description": """* Verify a linux kernel parameter value is on target * Live parameter value (sysctl executable) * Persistent parameter value (/etc/sysctl.conf) """, "form_definition": """ Desc: | A rule to set a list of Linux kernel parameters to be set in /etc/sysctl.conf. Current values can be checked as strictly equal, or superior/inferior to their target value. Each field in a vectored value can be tuned independantly using the index key. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: sysctl Inputs: - Id: key Label: Key DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: The /etc/sysctl.conf parameter to check. - Id: index Label: Index DisplayModeLabel: idx LabelCss: action16 Mandatory: Yes Default: 0 Type: integer Help: The /etc/sysctl.conf parameter to check. - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter current value. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The /etc/sysctl.conf parameter target value. """, } import os import sys import json import pwd from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class Sysctl(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): if os.uname()[0] != "Linux": raise NotApplicable() self.need_reload = False self.cf = os.path.join(os.sep, "etc", "sysctl.conf") if not os.path.exists(self.cf): perror(self.cf, 'does not exist') raise NotApplicable() self.keys = [] self.cache = None self.keys = self.get_rules() if len(self.keys) == 0: raise NotApplicable() self.convert_keys() def fixable(self): return RET_OK def parse_val(self, val): val = list(map(lambda x: x.strip(), val.strip().split())) for i, e in enumerate(val): try: val[i] = int(e) except: pass return val def get_keys(self): with open(self.cf, 'r') as f: buff = f.read() if self.cache is None: self.cache = {} for line in buff.splitlines(): line = line.strip() if line.startswith('#'): continue l = line.split('=') if len(l) != 2: continue key = l[0].strip() val = self.parse_val(l[1]) self.cache[key] = val def get_live_key(self, key): p = Popen(['sysctl', key], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: return None l = bdecode(out).split('=') if len(l) != 2: return None val = self.parse_val(l[1]) return val def get_key(self, key): if self.cache is None: self.get_keys() if key not in self.cache: return None return self.cache[key] def fix_key(self, key): done = False target = key['value'] index = key['index'] with open(self.cf, 'r') as f: buff = f.read() lines = buff.split('\n') for i, line in enumerate(lines): line = line.strip() if line.startswith('#'): continue l = line.split('=') if len(l) != 2: continue keyname = l[0].strip() if key['key'] != keyname: continue if done: pinfo("sysctl: remove redundant key %s"%keyname) del lines[i] continue val = self.parse_val(l[1]) if target == val[index]: done = True continue pinfo("sysctl: set %s[%d] = %s"%(keyname, index, str(target))) val[index] = target lines[i] = "%s = %s"%(keyname, " ".join(map(str, val))) done = True if not done: # if key is not in sysctl.conf, get the value from kernel val = self.get_live_key(key['key']) if val is None: perror("key '%s' not found in live kernel parameters" % key['key']) return RET_ERR if target != val[index]: val[index] = target pinfo("sysctl: set %s = %s"%(key['key'], " ".join(map(str, val)))) lines += ["%s = %s"%(key['key'], " ".join(map(str, val)))] try: with open(self.cf, 'w') as f: f.write('\n'.join(lines)) except: perror("failed to write sysctl.conf") return RET_ERR return RET_OK def convert_keys(self): keys = [] for key in self.keys: keyname = key['key'] value = key['value'] if type(value) == list: if len(value) > 0 and type(value[0]) != list: value = [value] for i, v in enumerate(value): keys.append({ "key": keyname, "index": i, "op": v[0], "value": v[1], }) elif 'key' in key and 'index' in key and 'op' in key and 'value' in key: keys.append(key) self.keys = keys def check_key(self, key, verbose=False): r = RET_OK keyname = key['key'] target = key['value'] op = key['op'] i = key['index'] current_value = self.get_key(keyname) current_live_value = self.get_live_key(keyname) if current_value is None: if verbose: perror("key '%s' not found in sysctl.conf"%keyname) return RET_ERR if op == "=" and str(current_value[i]) != str(target): if verbose: perror("sysctl err: %s[%d] = %s, target: %s"%(keyname, i, str(current_value[i]), str(target))) r |= RET_ERR elif op == ">=" and type(target) == int and current_value[i] < target: if verbose: perror("sysctl err: %s[%d] = %s, target: >= %s"%(keyname, i, str(current_value[i]), str(target))) r |= RET_ERR elif op == "<=" and type(target) == int and current_value[i] > target: if verbose: perror("sysctl err: %s[%d] = %s, target: <= %s"%(keyname, i, str(current_value[i]), str(target))) r |= RET_ERR else: if verbose: pinfo("sysctl ok: %s[%d] = %s, on target"%(keyname, i, str(current_value[i]))) if r == RET_OK and current_live_value is not None and current_value != current_live_value: if verbose: perror("sysctl err: %s on target in sysctl.conf but kernel value is different"%(keyname)) self.need_reload = True r |= RET_ERR return r def check(self): r = 0 for key in self.keys: r |= self.check_key(key, verbose=True) return r def reload_sysctl(self): cmd = ['sysctl', '-e', '-p'] pinfo("sysctl:", " ".join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) p.communicate() if p.returncode != 0: perror("reload failed") return RET_ERR return RET_OK def fix(self): r = 0 for key in self.keys: if self.check_key(key, verbose=False) == RET_ERR: self.need_reload = True r |= self.fix_key(key) if self.need_reload: r |= self.reload_sysctl() return r if __name__ == "__main__": main(Sysctl) opensvc-1.8~20170412/var/compliance/com.opensvc/smfcfgs.py0000755000175000017500000003132113073467726023424 0ustar jkelbertjkelbert#!/usr/bin/env python """ The ENV variable format is json-serialized [list of dict]: [ { "fmri": "svc:/network/ntp" "prop": "config/slew_always" "type": "boolean" "value": "true" "inorder": 0 "create": 1 "reload": 0 "sleep": 0 } { "fmri": "svc:/network/dns/client" "prop": "config/nameserver" "type": "net_address" "value": "172.30.65.165 172.30.65.164" "inorder": 0 "create": 1 "reload": 0 "sleep": 6 } { "fmri": "svc:/network/dns/client" "prop": "config/search" "type": "astring" "value": "cpdev.local cpprod.root.local cpgrp.root.local" "inorder": 1 "create": 1 "reload": 0 "sleep": 9 } ] """ import os import sys import json import re from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class AutoInst(dict): """autovivification feature.""" def __getitem__(self, item): try: return dict.__getitem__(self, item) except KeyError: value = self[item] = type(self)() return value class SmfCfgS(object): def __init__(self, prefix='OSVC_COMP_SMF_CFGS_'): self.prefix = prefix.upper() self.sysname, self.nodename, self.osn, self.solv, self.machine = os.uname() self.data = [] self.smfs = AutoInst() self.osver = float(self.osn) if self.osver < 5.11: pinfo('Only used on Solaris 11 and behond') return for k in [ key for key in os.environ if key.startswith(self.prefix)]: try: self.data += self.add_fmri(os.environ[k]) except ValueError: perror('failed to parse variable', os.environ[k]) for f in self.data: s,p,t,v = self.get_fmri(f['fmri'], f['prop']) if s is None: continue cre = False if p is None: if f['create'] == 0: perror('FMRI:%s, PROP:%s is absent and create is False' %(s,f['prop'])) continue else: p = f['prop'] cre = True if f['inorder'] == 0: ino = False else: ino = True if f['reload'] == 0: rel = False else: rel = True self.smfs[f['fmri']][p] = { 'val': f['value'], 'rval': v, 'typ': f['type'] , 'rtyp': t, 'ino': ino, 'cre': cre, 'rel': rel, 'slp': f['sleep'] } def subst(self, v): if type(v) == list: l = [] for _v in v: l.append(self.subst(_v)) return l if type(v) != str and type(v) != unicode: return v p = re.compile('%%ENV:\w+%%') for m in p.findall(v): s = m.strip("%").replace('ENV:', '') if s in os.environ: _v = os.environ[s] elif 'OSVC_COMP_'+s in os.environ: _v = os.environ['OSVC_COMP_'+s] else: perror(s, 'is not an env variable') raise NotApplicable() v = v.replace(m, _v) return v def add_fmri(self, v): if type(v) == str or type(v) == unicode: d = json.loads(v) else: d = v l = [] # recurse if multiple FMRI are specified in a list of dict if type(d) == list: for _d in d: l += self.add_fmri(_d) return l if type(d) != dict: perror("not a dict:", d) return l if 'fmri' not in d: perror('FMRI should be in the dict:', d) RET = RET_ERR return l if 'prop' not in d: perror('prop should be in the dict:', d) RET = RET_ERR return l if 'value' not in d: perror('value should be in the dict:', d) RET = RET_ERR return l if 'create' in d: if d['create'] == 1: if not 'type' in d: perror('create True[1] needs a type:', d) RET = RET_ERR return l for k in ('fmri', 'prop', 'value', 'inorder', 'type', 'create', 'sleep'): if k in d: d[k] = self.subst(d[k]) return [d] def fixable(self): return RET_NA def get_fmri(self, s, p): cmd = ['/usr/sbin/svccfg','-s', s, 'listprop', p] po = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = po.communicate() out = bdecode(out) err = bdecode(err) if po.returncode != 0: if "doesn't match" in err: pinfo('%s is absent => IGNORED' %self.service) return None,None,None,None else: perror(' '.join(cmd)) raise ComplianceError() if len(out) < 2: return s,None,None,None x = out.strip('\n').split() if x[0] != p: perror(' '.join([s, 'wanted:%s'%p, 'got:%s'%x[0]])) raise ComplianceError() return s,p,x[1],x[2:] def check_smf_prop_cre(self, s, p, verbose=True): r = RET_OK if self.smfs[s][p]['cre']: if verbose: perror('NOK: %s Prop %s shall be created' %(s,p)) r |= RET_ERR if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None: if verbose: perror('NOK: %s type must be specified to create %s' %(s,p)) return r,self.smfs[s][p]['cre'] def check_smf_prop_typ(self, s, p, verbose=True): r = RET_OK if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None: if verbose: pinfo('%s Prop %s type is not checked' %(s,p)) elif self.smfs[s][p]['typ'] != self.smfs[s][p]['rtyp']: if verbose: perror('NOK: %s Prop %s type Do Not match, got:%s, expected:%s' %(s,p,self.smfs[s][p]['rtyp'],self.smfs[s][p]['typ'])) r |= RET_ERR else: if verbose: pinfo('%s Prop %s type %s is OK' %(s,p,self.smfs[s][p]['typ'])) if self.smfs[s][p]['typ'] == '' or self.smfs[s][p]['typ'] == None: if verbose: perror('NOK: %s type must be specified to create %s' %(s,p)) return r def check_smf_prop_val(self, s, p, verbose=True): r = RET_OK rvs = ' '.join(self.smfs[s][p]['rval']) if self.smfs[s][p]['ino']: if self.smfs[s][p]['val'] == rvs: if verbose: pinfo('%s Prop %s values match in right order [%s]' %(s,p,rvs)) else: if verbose: perror('NOK: %s Prop %s values Do Not match, got:[%s], expected:[%s]' %(s,p,rvs,self.smfs[s][p]['val'])) r |= RET_ERR else: vv = self.smfs[s][p]['val'].split() m = True for v in vv: if not v in self.smfs[s][p]['rval']: if verbose and len(self.smfs[s][p]['rval']) > 1 : perror('%s Prop %s notfound %s' %(s,p,v)) m = False else: if verbose and len(self.smfs[s][p]['rval']) > 1 : pinfo('%s Prop %s found %s' %(s,p,v)) if m: if verbose: pinfo('%s Prop %s values match [%s]' %(s,p,rvs)) else: if verbose: perror('NOK: %s Prop %s values Do Not match, got:[%s], expected:[%s]' %(s,p,rvs,self.smfs[s][p]['val'])) r |= RET_ERR return r def check_smfs(self, verbose=True): r = RET_OK for s in self.smfs: for p in self.smfs[s]: """ pinfo('FMRI: ', s, 'PROP: ', p, 'TYP: ', self.smfs[s][p]['typ'], 'RTYP: ', self.smfs[s][p]['rtyp'], type(self.smfs[s][p]['val']), type(self.smfs[s][p]['rval'])) pinfo(' ', 'VALS: ', self.smfs[s][p]['val']) pinfo(' ', 'RVALS: ', self.smfs[s][p]['rval']) """ rx,c = self.check_smf_prop_cre(s, p, verbose=verbose) r |= rx if not c: r |= self.check_smf_prop_typ(s, p, verbose=verbose) r |= self.check_smf_prop_val(s, p, verbose=verbose) return r def fix_smfs(self, verbose=False): r = RET_OK cmds = [] for s in self.smfs: for p in self.smfs[s]: added = False rx,c = self.check_smf_prop_cre(s, p, verbose=verbose) vx = self.smfs[s][p]['val'].split() if c: if rx == 0 : pinfo('%s try to add %s %s: = %s' %(s,p,self.smfs[s][p]['typ'],self.smfs[s][p]['val'])) if len(vx) > 1: sxok = True for v in vx: if not (v.startswith('"') and v.endswith('"')): """ sxok = False break """ if sxok: cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['typ']+':', '(%s)'%self.smfs[s][p]['val']]) added = True else: perror('NOK: %s prop %s values must be within double quotes [%s]' %(s,p,self.smfs[s][p]['val'])) r |= RET_ERR else: cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['typ']+':', self.smfs[s][p]['val']]) added = True else: perror('NOK: %s cannot add prop %s without a valid type' %(s,p)) r |= RET_ERR else: ry = self.check_smf_prop_val(s, p, verbose=verbose) if ry != 0: pinfo('%s try to fix %s = %s' %(s,p,self.smfs[s][p]['val'])) if len(vx) > 1: sxok = True for v in vx: if not (v.startswith('"') and v.endswith('"')): """ sxok = False break """ if sxok: cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', '(%s)'%self.smfs[s][p]['val']]) added = True else: perror('NOK: %s prop %s values must be within double quotes [%s]' %(s,p,self.smfs[s][p]['val'])) r |= RET_ERR else: cmds.append(['/usr/sbin/svccfg', '-s', s, 'setprop', p, '=', self.smfs[s][p]['val']]) added = True if added: if self.smfs[s][p]['rel']: cmds.append(['/usr/sbin/svcadm', 'refresh' ,s]) if self.smfs[s][p]['slp'] != 0: cmds.append(['/usr/bin/sleep' , '%d'%self.smfs[s][p]['slp']]) for cmd in cmds: pinfo('EXEC:', ' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() err = bdecode(err) if p.returncode != 0: perror('Code=%s %s' %(p.returncode, err)) r |= RET_ERR return r def check(self): if self.osver < 5.11: return RET_NA r = self.check_smfs() return r def fix(self): if self.osver < 5.11: return RET_NA r = self.fix_smfs() return r if __name__ == "__main__": syntax = """syntax: %s check|fixable|fix]"""%sys.argv[0] try: action = sys.argv[1] o = SmfCfgS() if action == 'check': RET = o.check() elif action == 'fix': RET = o.fix() elif action == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except NotApplicable: sys.exit(RET_NA) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) opensvc-1.8~20170412/var/compliance/com.opensvc/group.py0000755000175000017500000001707213073467726023137 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_GROUP_", "example_value": """ { "tibco": { "gid": 1000, }, "tibco1": { "gid": 1001, } } """, "description": """* Verify a local system group configuration * A minus (-) prefix to the group name indicates the user should not exist """, "form_definition": """ Desc: | A rule defining a list of Unix groups and their properties. Used by the groups compliance objects. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: dict of dict Key: group EmbedKey: No Class: group Inputs: - Id: group Label: Group name DisplayModeLabel: group LabelCss: guys16 Mandatory: Yes Type: string Help: The Unix group name. - Id: gid Label: Group id DisplayModeLabel: gid LabelCss: guys16 Type: string or integer Help: The Unix gid of this group. """, } import os import sys import json import grp import re from subprocess import Popen sys.path.append(os.path.dirname(__file__)) from comp import * blacklist = [ "root", "bin", "daemon", "sys", "adm", "tty", "disk", "lp", "mem", "kmem", "wheel", "mail", "uucp", "man", "games", "gopher", "video", "dip", "ftp", "lock", "audio", "nobody", "users", "utmp", "utempter", "floppy", "vcsa", "cdrom", "tape", "dialout", "saslauth", "postdrop", "postfix", "sshd", "opensvc", "mailnull", "smmsp", "slocate", "rpc", "rpcuser", "nfsnobody", "tcpdump", "ntp" ] class CompGroup(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.grt = { 'gid': 'gr_gid', } self.groupmod_p = { 'gid': '-g', } self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname == "FreeBSD": self.groupadd = ["pw", "groupadd"] self.groupmod = ["pw", "groupmod"] self.groupdel = ["pw", "groupdel"] elif self.sysname == 'AIX': self.groupmod = ['chgroup'] self.groupadd = ['mkgroup'] self.groupdel = ['rmgroup'] self.groupmod_p = { 'gid': 'id', } else: self.groupadd = ["groupadd"] self.groupmod = ["groupmod"] self.groupdel = ["groupdel"] if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1', 'FreeBSD']: perror('group: module not supported on', self.sysname) raise NotApplicable self.groups = {} for d in self.get_rules(): self.groups.update(d) for group, d in self.groups.items(): for k in ('uid', 'gid'): if k in d: self.groups[group][k] = int(d[k]) def fixable(self): return RET_NA def fmt_opt_gen(self, item, target): return [item, target] def fmt_opt_aix(self, item, target): return ['='.join((item, target))] def fmt_opt(self, item, target): if self.sysname == 'AIX': return self.fmt_opt_aix(item, target) else: return self.fmt_opt_gen(item, target) def fix_item(self, group, item, target): if item in self.groupmod_p: cmd = [] + self.groupmod if self.sysname == "FreeBSD": cmd += [group] cmd += self.fmt_opt(self.groupmod_p[item], str(target)) if self.sysname != "FreeBSD": cmd += [group] pinfo("group:", ' '.join(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: return RET_OK else: return RET_ERR else: perror('group: no fix implemented for', item) return RET_ERR def check_item(self, group, item, target, current, verbose=False): if type(current) == int and current < 0: current += 4294967296 if target == current: if verbose: pinfo('group', group, item+':', current) return RET_OK else: if verbose: perror('group', group, item+':', current, 'target:', target) return RET_ERR def try_create_group(self, props): # # don't try to create group if passwd db is not 'files' # beware: 'files' db is the implicit default # if 'db' in props and props['db'] != 'files': return False if set(self.grt.keys()) <= set(props.keys()): return True return False def check_group_del(self, group): try: groupinfo = grp.getgrnam(group) except KeyError: pinfo('group', group, 'does not exist, on target') return RET_OK perror('group', group, "exists, shouldn't") return RET_ERR def check_group(self, group, props): if group.startswith('-'): return self.check_group_del(group.lstrip('-')) r = 0 try: groupinfo = grp.getgrnam(group) except KeyError: if self.try_create_group(props): perror('group', group, 'does not exist') return RET_ERR else: pinfo('group', group, 'does not exist and not enough info to create it') return RET_OK for prop in self.grt: if prop in props: r |= self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop]), verbose=True) return r def create_group(self, group, props): cmd = [] + self.groupadd if self.sysname == "FreeBSD": cmd += [group] for item in self.grt: cmd += self.fmt_opt(self.groupmod_p[item], str(props[item])) if self.sysname != "FreeBSD": cmd += [group] pinfo("group:", ' '.join(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: return RET_OK else: return RET_ERR def fix_group_del(self, group): if group in blacklist: perror("group", group, "... cowardly refusing to delete") return RET_ERR try: groupinfo = grp.getgrnam(group) except KeyError: return RET_OK cmd = self.groupdel + [group] pinfo("group:", ' '.join(cmd)) p = Popen(cmd) out, err = p.communicate() r = p.returncode if r == 0: return RET_OK else: return RET_ERR def fix_group(self, group, props): if group.startswith('-'): return self.fix_group_del(group.lstrip('-')) r = 0 try: groupinfo = grp.getgrnam(group) except KeyError: if self.try_create_group(props): return self.create_group(group, props) else: perror('group', group, 'does not exist') return RET_OK for prop in self.grt: if prop in props and \ self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop])) != RET_OK: r |= self.fix_item(group, prop, props[prop]) return r def check(self): r = 0 for group, props in self.groups.items(): r |= self.check_group(group, props) return r def fix(self): r = 0 for group, props in self.groups.items(): r |= self.fix_group(group, props) return r if __name__ == "__main__": main(CompGroup) opensvc-1.8~20170412/var/compliance/com.opensvc/zprop.py0000755000175000017500000001047513073467726023155 0ustar jkelbertjkelbert#!/usr/bin/env python import os import sys sys.path.append(os.path.dirname(__file__)) from utilities import which from comp import * from subprocess import * class CompZprop(CompObject): def __init__(self, prefix='OSVC_COMP_ZPROP_'): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.sysname, self.nodename, x, x, self.machine = os.uname() self.data = [] for rule in self.get_rules(): try: self.data += self.add_rule(rule) except InitError: continue except ValueError: perror('failed to parse variable', rule) def add_rule(self, d): allgood = True for k in ["name", "prop", "op", "value"]: if k not in d: perror('the', k, 'key should be in the dict:', d) allgood = False if allgood: return [d] return [] def get_prop(self, d): cmd = [self.zbin, "get", d.get("prop"), d.get("name")] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: return out = bdecode(out) l = [line for line in out.splitlines() if line != ""] if len(l) != 2: return v1 = l[0].split() v2 = l[1].split() if len(v1) != len(v2): return data = {} for k, v in zip(v1, v2): data[k] = v return data def check_le(self, current, target): current = int(current) if current <= target: return RET_OK return RET_ERR def check_ge(self, current, target): current = int(current) if current >= target: return RET_OK return RET_ERR def check_lt(self, current, target): current = int(current) if current < target: return RET_OK return RET_ERR def check_gt(self, current, target): current = int(current) if current > target: return RET_OK return RET_ERR def check_eq(self, current, target): if current == str(target): return RET_OK return RET_ERR def fixable(self): return RET_NA def fix_zprop(self, d): if self.check_zprop(d) == RET_OK: return RET_OK prop = d.get("prop") target = d.get("value") name = d.get("name") cmd = [self.zbin, "set", prop+"="+target, name] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: perror(err) return RET_ERR return RET_OK def check_zprop(self, d, verbose=False): v = self.get_prop(d) prop = d.get("prop") if v is None: if verbose: perror("property", prop, "does not exist") return RET_ERR current = v["VALUE"] op = d.get("op") target = d.get("value") if op == "=": r = self.check_eq(current, target) elif op == "<=": r = self.check_le(current, target) elif op == "<": r = self.check_lt(current, target) elif op == ">=": r = self.check_ge(current, target) elif op == ">": r = self.check_gt(current, target) else: perror("unsupported operator", op) return RET_ERR if verbose: if r == RET_OK: pinfo("property %s current value %s is %s %s. on target." % (prop, current, op, target)) else: pinfo("property %s current value %s is not %s %s." % (prop, current, op, target)) return r def check_zbin(self): return which(self.zbin) def check(self): if not self.check_zbin(): pinfo(self.zbin, "not found") return RET_NA r = 0 for d in self.data: r |= self.check_zprop(d, verbose=True) return r def fix(self): if not self.check_zbin(): pinfo(self.zbin, "not found") return RET_NA r = 0 for d in self.data: r |= self.fix_zprop(d) return r if __name__ == "__main__": main(CompZprop) opensvc-1.8~20170412/var/compliance/com.opensvc/rc.py0000755000175000017500000001064113073467726022402 0ustar jkelbertjkelbert#!/usr/bin/env python """ [{"service": "foo", "level": "2345", "state": "on"}, {"service": "foo", "level": "016", "state": "off"}, {"service": "bar", "state": "on"}, ...] """ import os import sys import json import pwd import re from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class CompRc(object): def __init__(self, prefix='OSVC_COMP_RC_'): self.prefix = prefix.upper() self.sysname, self.nodename, x, x, self.machine = os.uname() self.services = [] for k in [key for key in os.environ if key.startswith(self.prefix)]: try: l = json.loads(os.environ[k]) for i, d in enumerate(l): for key, val in d.items(): d[key] = self.subst(val) l[i] = d self.services += l except ValueError: perror('failed to concatenate', os.environ[k], 'to service list') self.validate_svcs() if len(self.services) == 0: raise NotApplicable() if self.sysname not in ['Linux', 'HP-UX']: perror(__file__, 'module not supported on', self.sysname) raise NotApplicable() vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown') release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown') if vendor in ['CentOS', 'Redhat', 'Red Hat', 'SuSE'] or \ (vendor == 'Oracle' and self.sysname == 'Linux'): import chkconfig self.o = chkconfig.Chkconfig() elif vendor in ['Ubuntu', 'Debian', 'HP']: import sysvinit self.o = sysvinit.SysVInit() else: perror(vendor, "not supported") raise NotApplicable() def subst(self, v): if type(v) == list: l = [] for _v in v: l.append(self.subst(_v)) return l if type(v) != str and type(v) != unicode: return v p = re.compile('%%ENV:\w+%%') for m in p.findall(v): s = m.strip("%").replace('ENV:', '') if s in os.environ: _v = os.environ[s] elif 'OSVC_COMP_'+s in os.environ: _v = os.environ['OSVC_COMP_'+s] else: perror(s, 'is not an env variable') raise NotApplicable() v = v.replace(m, _v) return v def validate_svcs(self): l = [] for i, svc in enumerate(self.services): if self.validate_svc(svc) == RET_OK: l.append(svc) self.svcs = l def validate_svc(self, svc): if 'service' not in svc: perror(svc, ' rule is malformed ... service key not present') return RET_ERR if 'state' not in svc: perror(svc, ' rule is malformed ... state key not present') return RET_ERR return RET_OK def check_svc(self, svc, verbose=True): if 'seq' in svc: seq = svc['seq'] else: seq = None return self.o.check_state(svc['service'], svc['level'], svc['state'], seq=seq, verbose=verbose) def fix_svc(self, svc, verbose=True): if 'seq' in svc: seq = svc['seq'] else: seq = None if self.check_svc(svc, verbose=False) == RET_OK: return RET_OK return self.o.fix_state(svc['service'], svc['level'], svc['state'], seq=seq) def check(self): r = 0 for svc in self.services: r |= self.check_svc(svc) return r def fix(self): r = 0 for svc in self.services: r |= self.fix_svc(svc) return r if __name__ == "__main__": syntax = """syntax: %s PREFIX check|fixable|fix"""%sys.argv[0] if len(sys.argv) != 3: perror("wrong number of arguments") perror(syntax) sys.exit(RET_ERR) try: o = CompRc(sys.argv[1]) if sys.argv[2] == 'check': RET = o.check() elif sys.argv[2] == 'fix': RET = o.fix() elif sys.argv[2] == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except NotApplicable: sys.exit(RET_NA) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) opensvc-1.8~20170412/var/compliance/com.opensvc/package.py0000755000175000017500000006322613073467726023400 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_PACKAGES_", "example_value": """ [ "bzip2", "-zip", "zip" ] """, "description": """* Verify a list of packages is installed or removed * A '-' prefix before the package name means the package should be removed * No prefix before the package name means the package should be installed * The package version is not checked """, "form_definition": """ Desc: | A rule defining a set of packages, fed to the 'packages' compliance object for it to check each package installed or not-installed status. Css: comp48 Outputs: - Dest: compliance variable Class: package Type: json Format: list Inputs: - Id: pkgname Label: Package name DisplayModeLabel: "" LabelCss: pkg16 Mandatory: Yes Help: Use '-' as a prefix to set 'not installed' as the target state. Use '*' as a wildcard for package name expansion for operating systems able to list packages available for installation. Type: string """, } import os import re import sys import json import pwd import tempfile from subprocess import * from utilities import which sys.path.append(os.path.dirname(__file__)) from comp import * class CompPackages(CompObject): def __init__(self, prefix='OSVC_COMP_PACKAGES_', uri=None): CompObject.__init__(self, prefix=prefix, data=data) self.uri = uri def init(self): self.combo_fix = False self.sysname, self.nodename, x, x, self.machine = os.uname() self.known_archs = ['i386', 'i586', 'i686', 'x86_64', 'noarch', '*'] if self.sysname not in ['Linux', 'AIX', 'HP-UX', 'SunOS', 'FreeBSD']: perror(__file__, 'module not supported on', self.sysname) raise NotApplicable() if 'OSVC_COMP_PACKAGES_PKG_TYPE' in os.environ and \ os.environ['OSVC_COMP_PACKAGES_PKG_TYPE'] == "bundle": self.pkg_type = 'bundle' else: self.pkg_type = 'product' self.packages = self.get_rules() if len(self.packages) == 0: raise NotApplicable() self.data = {} l = [] for pkg in self.packages: if type(pkg) == dict: l.append(pkg['pkgname']) self.data[pkg['pkgname']] = pkg if len(l) > 0: self.packages = l vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown') release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown') if vendor in ['Debian', 'Ubuntu']: self.get_installed_packages = self.deb_get_installed_packages self.pkg_add = self.apt_fix_pkg self.pkg_del = self.apt_del_pkg elif vendor in ['CentOS', 'Redhat', 'Red Hat'] or \ (vendor == 'Oracle' and self.sysname == 'Linux'): if which("yum") is None: perror("package manager not found (yum)") raise ComplianceError() self.combo_fix = True self.get_installed_packages = self.rpm_get_installed_packages self.pkg_add = self.yum_fix_pkg self.pkg_del = self.yum_del_pkg elif vendor == "SuSE": if which("zypper") is None: perror("package manager not found (zypper)") raise ComplianceError() self.get_installed_packages = self.rpm_get_installed_packages self.pkg_add = self.zyp_fix_pkg self.pkg_del = self.zyp_del_pkg elif vendor == "FreeBSD": if which("pkg") is None: perror("package manager not found (pkg)") raise ComplianceError() self.get_installed_packages = self.freebsd_pkg_get_installed_packages self.pkg_add = self.freebsd_pkg_fix_pkg self.pkg_del = self.freebsd_pkg_del_pkg elif vendor in ['IBM']: self.get_installed_packages = self.aix_get_installed_packages self.pkg_add = self.aix_fix_pkg self.pkg_del = self.aix_del_pkg if self.uri is None: perror("resource must be set") raise NotApplicable() elif vendor in ['HP']: self.get_installed_packages = self.hp_get_installed_packages self.pkg_add = self.hp_fix_pkg self.pkg_del = self.hp_del_pkg elif vendor in ['Oracle']: self.get_installed_packages = self.sol_get_installed_packages self.pkg_add = self.sol_fix_pkg self.pkg_del = self.sol_del_pkg else: perror(vendor, "not supported") raise NotApplicable() self.load_reloc() self.packages = map(lambda x: x.strip(), self.packages) self.expand_pkgnames() self.installed_packages = self.get_installed_packages() def load_reloc(self): self.reloc = {} for i, pkgname in enumerate(self.packages): l = pkgname.split(':') if len(l) != 2: continue self.packages[i] = l[0] self.reloc[l[0]] = l[1] def expand_pkgnames(self): """ Expand wildcards and implicit arch """ l = [] for pkgname in self.packages: if (pkgname.startswith('-') or pkgname.startswith('+')) and len(pkgname) > 1: prefix = pkgname[0] pkgname = pkgname[1:] else: prefix = '' l += map(lambda x: prefix+x, self.expand_pkgname(pkgname, prefix)) self.packages = l def expand_pkgname(self, pkgname, prefix): vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown') release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown') if vendor in ['CentOS', 'Redhat', 'Red Hat'] or (vendor == 'Oracle' and release.startswith('VM ')): return self.yum_expand_pkgname(pkgname, prefix) elif vendor == 'SuSE': return self.zyp_expand_pkgname(pkgname, prefix) elif vendor in ['IBM']: return self.aix_expand_pkgname(pkgname, prefix) return [pkgname] def aix_expand_pkgname(self, pkgname, prefix=''): """ LGTOnw.clnt:LGTOnw.clnt.rte:8.1.1.6::I:C:::::N:NetWorker Client::::0:: LGTOnw.man:LGTOnw.man.rte:8.1.1.6::I:C:::::N:NetWorker Man Pages::::0:: or for rpm lpp_source: zlib ALL @@R:zlib _all_filesets @@R:zlib-1.2.7-2 1.2.7-2 """ if not hasattr(self, "nimcache"): cmd = ['nimclient', '-o', 'showres', '-a', 'resource=%s'%self.uri, '-a', 'installp_flags=L'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() err = bdecode(err) self.lpp_type = "installp" if "0042-175" in err: # not a native installp lpp_source cmd = ['nimclient', '-o', 'showres', '-a', 'resource=%s'%self.uri] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() self.lpp_type = "rpm" out = bdecode(out) self.nimcache = out.splitlines() l = [] if self.lpp_type == "rpm": l = self.aix_expand_pkgname_rpm(pkgname, prefix=prefix) elif self.lpp_type == "native": l = self.aix_expand_pkgname_native(pkgname, prefix=prefix) if len(l) == 0: l = [pkgname] return l def aix_expand_pkgname_rpm(self, pkgname, prefix=''): import fnmatch l = [] for line in self.nimcache: line = line.strip() if len(line) == 0: continue words = line.split() if line.startswith("@@") and len(words) > 1: _pkgvers = words[1] if fnmatch.fnmatch(_pkgname, pkgname) and _pkgname not in l: l.append(_pkgname) else: _pkgname = words[0] return l def aix_expand_pkgname_native(self, pkgname, prefix=''): import fnmatch l = [] for line in self.nimcache: words = line.split(':') if len(words) < 5: continue _pkgvers = words[2] _pkgname = words[1].replace('-'+_pkgvers, '') if fnmatch.fnmatch(_pkgname, pkgname) and _pkgname not in l: l.append(_pkgname) return l def zyp_expand_pkgname(self, pkgname, prefix=''): arch_specified = False for arch in self.known_archs: if pkgname.endswith(arch): arch_specified = True cmd = ['zypper', '--non-interactive', 'packages'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: if prefix != '-': perror('can not expand (cmd error)', pkgname, err) return [] else: return [pkgname] out = bdecode(out) lines = out.splitlines() if len(lines) < 2: if prefix != '-': perror('can not expand', pkgname) return [] else: return [pkgname] for i, line in enumerate(lines): if "--+--" in line: break lines = lines[i+1:] l = [] for line in lines: words = map(lambda x: x.strip(), line.split(" | ")) if len(words) != 5: continue _status, _repo, _name, _version, _arch = words if arch_specified: if _name != pkgname or (arch != '*' and arch != _arch): continue else: if _name != pkgname: continue _pkgname = '.'.join((_name, _arch)) if _pkgname in l: continue l.append(_pkgname) if arch_specified or len(l) == 1: return l if os.environ['OSVC_COMP_NODES_OS_ARCH'] in ('i386', 'i586', 'i686', 'ia32'): archs = ('i386', 'i586', 'i686', 'ia32', 'noarch') else: archs = (os.environ['OSVC_COMP_NODES_OS_ARCH'], 'noarch') ll = [] for pkgname in l: if pkgname.split('.')[-1] in archs: # keep only packages matching the arch ll.append(pkgname) return ll def yum_expand_pkgname(self, pkgname, prefix=''): arch_specified = False for arch in self.known_archs: if pkgname.endswith(arch): arch_specified = True cmd = ['yum', 'list', pkgname] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: if prefix != '-': perror('can not expand (cmd error)', pkgname, err) return [] else: return [pkgname] out = bdecode(out) lines = out.splitlines() if len(lines) < 2: if prefix != '-': perror('can not expand', pkgname) return [] else: return [pkgname] lines = lines[1:] l = [] for line in lines: words = line.split() if len(words) != 3: continue if words[0] in ("Installed", "Available", "Loaded", "Updating"): continue if words[0] in l: continue l.append((words[0], words[1])) ll = [] ix86_added = False from distutils.version import LooseVersion as V for _pkgname, _version in sorted(l, key=lambda x: V(x[1]), reverse=True): pkgarch = _pkgname.split('.')[-1] if pkgarch not in ('i386', 'i586', 'i686', 'ia32'): #pinfo("add", _pkgname, "because", pkgarch, "not in ('i386', 'i586', 'i686', 'ia32')") ll.append(_pkgname) elif not ix86_added: #pinfo("add", _pkgname, "because", pkgarch, "not ix86_added") ll.append(_pkgname) ix86_added = True l = ll if arch_specified or len(l) == 1: return l if os.environ['OSVC_COMP_NODES_OS_ARCH'] in ('i386', 'i586', 'i686', 'ia32'): archs = ('i386', 'i586', 'i686', 'ia32', 'noarch') else: archs = (os.environ['OSVC_COMP_NODES_OS_ARCH'], 'noarch') ll = [] for pkgname in l: pkgarch = pkgname.split('.')[-1] if pkgarch not in archs: # keep only packages matching the arch continue ll.append(pkgname) return ll def hp_parse_swlist(self, out): l = {} for line in out.split('\n'): if line.startswith('#') or len(line) == 0: continue v = line.split() if len(v) < 2: continue if v[0] in l: l[v[0]] += [(v[1], "")] else: l[v[0]] = [(v[1], "")] return l def hp_del_pkg(self, pkg): perror("TODO:", __fname__) return RET_ERR def hp_fix_pkg(self, pkg): if pkg in self.reloc: pkg = ':'.join((pkg, self.reloc[pkg])) cmd = ['swinstall', '-x', 'allow_downdate=true', '-x', 'mount_all_filesystems=false', '-s', self.uri, pkg] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if len(out) > 0: pinfo(out) if len(err) > 0: perror(err) if p.returncode != 0: return RET_ERR return RET_OK def hp_get_installed_packages(self): p = Popen(['swlist', '-l', self.pkg_type], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] out = bdecode(out) return self.hp_parse_swlist(out).keys() def get_free(self, c): if not os.path.exists(c): return 0 cmd = ["df", "-k", c] p = Popen(cmd, stdout=PIPE, stderr=None) out, err = p.communicate() out = bdecode(out) for line in out.split(): if "%" in line: l = out.split() for i, w in enumerate(l): if '%' in w: break try: f = int(l[i-1]) return f except: return 0 return 0 def get_temp_dir(self): if hasattr(self, "tmpd"): return self.tmpd candidates = ["/tmp", "/var/tmp", "/root"] free = {} for c in candidates: free[self.get_free(c)] = c max = sorted(free.keys())[-1] self.tmpd = free[max] pinfo("selected %s as temp dir (%d KB free)" % (self.tmpd, max)) return self.tmpd def download(self, pkg_name): import urllib import tempfile f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()) dname = f.name f.close() try: os.makedirs(dname) except: pass fname = os.path.join(dname, "file") try: self.urllib.urlretrieve(pkg_name, fname) except IOError: try: os.unlink(fname) os.unlink(dname) except: pass raise Exception("download failed: %s" % str(e)) import tarfile os.chdir(dname) try: tar = tarfile.open(fname) except: pinfo("not a tarball") return fname try: tar.extractall() except: try: os.unlink(fname) os.unlink(dname) except: pass # must be a pkg return dname tar.close() os.unlink(fname) return dname def get_os_ver(self): cmd = ['uname', '-v'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: return 0 out = bdecode(out) lines = out.splitlines() if len(lines) == 0: return 0 try: osver = float(lines[0]) except: osver = 0 return osver def sol_get_installed_packages(self): p = Popen(['pkginfo', '-l'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] l = [] out = bdecode(out) for line in out.splitlines(): v = line.split(':') if len(v) != 2: continue f = v[0].strip() if f == "PKGINST": pkgname = v[1].strip() l.append(pkgname) return l def sol_del_pkg(self, pkg): if pkg not in self.installed_packages: return RET_OK yes = os.path.dirname(__file__) + "/yes" cmd = '%s | pkgrm %s' % (yes, pkg) pinfo(cmd) r = os.system(cmd) if r != 0: return RET_ERR return RET_OK def sol_fix_pkg(self, pkg): data = self.data[pkg] if 'repo' not in data or len(data['repo']) == 0: perror("no repo specified in the rule") return RET_NA if data['repo'].endswith("/"): pkg_url = data['repo']+"/"+pkg else: pkg_url = data['repo'] pinfo("download", pkg_url) try: dname = self.download(pkg_url) except Exception as e: perror(e) return RET_ERR if os.path.isfile(dname): d = dname else: d = "." os.chdir(dname) if self.get_os_ver() < 10: opts = '' else: opts = '-G' if 'resp' in data and len(data['resp']) > 0: f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()) resp = f.name f.close() with open(resp, "w") as f: f.write(data['resp']) else: resp = "/dev/null" yes = os.path.dirname(__file__) + "/yes" cmd = '%s | pkgadd -r %s %s -d %s all' % (yes, resp, opts, d) pinfo(cmd) r = os.system(cmd) os.chdir("/") if os.path.isdir(dname): import shutil shutil.rmtree(dname) if r != 0: return RET_ERR return RET_OK def aix_del_pkg(self, pkg): cmd = ['installp', '-u', pkg] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if len(out) > 0: pinfo(out) if len(err) > 0: perror(err) if p.returncode != 0: return RET_ERR return RET_OK def aix_fix_pkg(self, pkg): cmd = ['nimclient', '-o', 'cust', '-a', 'lpp_source=%s'%self.uri, '-a', 'installp_flags=Y', '-a', 'filesets=%s'%pkg] s = " ".join(cmd) pinfo(s) r = os.system(s) if r != 0: return RET_ERR return RET_OK def aix_get_installed_packages(self): cmd = ['lslpp', '-Lc'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] pkgs = [] out = bdecode(out) for line in out.splitlines(): l = line.split(':') if len(l) < 5: continue pkgvers = l[2] pkgname = l[1].replace('-'+pkgvers, '') pkgs.append(pkgname) return pkgs def freebsd_pkg_get_installed_packages(self): p = Popen(['pkg', 'info'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] l = [] out = bdecode(out) for line in out.splitlines(): try: i = line.index(" ") line = line[:i] i = line.rindex("-") l.append(line[:i]) except ValueError: pass return l def rpm_get_installed_packages(self): p = Popen(['rpm', '-qa', '--qf', '%{NAME}.%{ARCH}\n'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] out = bdecode(out) return out.splitlines() def deb_get_installed_packages(self): p = Popen(['dpkg', '-l'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return [] l = [] out = bdecode(out) for line in out.splitlines(): if not line.startswith('ii'): continue pkgname = line.split()[1] pkgname = pkgname.split(':')[0] l.append(pkgname) return l def freebsd_pkg_del_pkg(self, pkg): cmd = ['pkg', 'remove', '-y', pkg] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def freebsd_pkg_fix_pkg(self, pkg): cmd = ['pkg', 'install', '-y', pkg] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def zyp_del_pkg(self, pkg): cmd = ['zypper', 'remove', '-y', pkg] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def zyp_fix_pkg(self, pkg): cmd = ['zypper', 'install', '-y', pkg] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def yum_del_pkg(self, pkg): if type(pkg) == list: cmd = ['yum', '-y', 'remove'] + pkg else: cmd = ['yum', '-y', 'remove', pkg] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def yum_fix_pkg(self, pkg): cmd = ['yum', '-y', 'install'] + pkg pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: err = bdecode(err) if len(err) > 0: pinfo(err) return RET_ERR return RET_OK def apt_del_pkg(self, pkg): r = call(['apt-get', 'remove', '-y', pkg]) if r != 0: return RET_ERR return RET_OK def apt_fix_pkg(self, pkg): r = call(['apt-get', 'install', '--allow-unauthenticated', '-y', pkg]) if r != 0: return RET_ERR return RET_OK def fixable(self): return RET_NA def fix_pkg_combo(self): l_add = [] l_del = [] for pkg in self.packages: if pkg.startswith('-') and len(pkg) > 1: l_del.append(pkg[1:]) elif pkg.startswith('+') and len(pkg) > 1: l_add.append(pkg[1:]) else: l_add.append(pkg) if len(l_add) > 0: r = self.pkg_add(l_add) if r != RET_OK: return r if len(l_del) > 0: r = self.pkg_del(l_del) if r != RET_OK: return r return RET_OK def fix_pkg(self, pkg): if pkg.startswith('-') and len(pkg) > 1: return self.pkg_del(pkg[1:]) if pkg.startswith('+') and len(pkg) > 1: return self.pkg_add(pkg[1:]) else: return self.pkg_add(pkg) def check_pkg(self, pkg, verbose=True): if pkg.startswith('-') and len(pkg) > 1: return self.check_pkg_del(pkg[1:], verbose) if pkg.startswith('+') and len(pkg) > 1: return self.check_pkg_add(pkg[1:], verbose) else: return self.check_pkg_add(pkg, verbose) def check_pkg_del(self, pkg, verbose=True): if pkg in self.installed_packages: if verbose: perror('package', pkg, 'is installed') return RET_ERR if verbose: pinfo('package', pkg, 'is not installed') return RET_OK def check_pkg_add(self, pkg, verbose=True): if not pkg in self.installed_packages: if verbose: perror('package', pkg, 'is not installed') return RET_ERR if verbose: pinfo('package', pkg, 'is installed') return RET_OK def check(self): r = 0 for pkg in self.packages: r |= self.check_pkg(pkg) return r def fix(self): r = 0 if self.combo_fix: return self.fix_pkg_combo() for pkg in self.packages: if self.check_pkg(pkg, verbose=False) == RET_OK: continue r |= self.fix_pkg(pkg) return r if __name__ == "__main__": main(CompPackages) opensvc-1.8~20170412/var/compliance/com.opensvc/fileinc.py0000755000175000017500000002126113073467726023407 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_FILEINC_", "example_value": """ { "path": "/tmp/foo", "check": ".*some pattern.*", "fmt": "full added content with %%HOSTNAME%%@corp.com: some pattern into the file." } """, "description": """* Verify file content. * The collector provides the format with wildcards. * The module replace the wildcards with contextual values. * The fmt must match the check pattern Wildcards: %%ENV:VARNAME%% Any environment variable value %%HOSTNAME%% Hostname %%SHORT_HOSTNAME%% Short hostname """, "form_definition": """ Desc: | A fileinc rule, fed to the 'fileinc' compliance object to verify a line matching the 'check' regular expression is present in the specified file. Css: comp48 Outputs: - Dest: compliance variable Class: fileinc Type: json Format: dict Inputs: - Id: path Label: Path DisplayModeLabel: path LabelCss: hd16 Mandatory: Yes Help: File path to search the matching line into. Type: string - Id: check Label: Check regexp DisplayModeLabel: check LabelCss: action16 Mandatory: Yes Help: A regular expression. Matching the regular expression is sufficent to grant compliancy. Type: string - Id: fmt Label: Format DisplayModeLabel: fmt LabelCss: action16 Help: The line installed if the check pattern is not found in the file. Type: string - Id: ref Label: URL to format DisplayModeLabel: ref LabelCss: loc Help: An URL pointing to a file containing the line installed if the check pattern is not found in the file. Type: string """, } import os import sys import json import stat import re import urllib import tempfile import codecs sys.path.append(os.path.dirname(__file__)) from comp import * MAXSZ = 8*1024*1024 class CompFileInc(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.files = {} self.ok = {} self.checks = [] self.upds = {} self.sysname, self.nodename, x, x, self.machine = os.uname() for rule in self.get_rules(): self.add_rule(rule) if len(self.checks) == 0: raise NotApplicable() def fixable(self): return RET_NA def parse_fmt(self, x): if isinstance(x, int): x = str(x) x = x.replace('%%HOSTNAME%%', self.nodename) x = x.replace('%%SHORT_HOSTNAME%%', self.nodename.split('.')[0]) return x def parse_ref(self, url): f = tempfile.NamedTemporaryFile() tmpf = f.name try: self.urlretrieve(url, tmpf) f.close() except Exception as e: perror(url, "download error:", e) return '' content = unicode(f.read()) return self.parse_fmt(content) def read_file(self, path): if not os.path.exists(path): return '' out = '' try : f = codecs.open(path, 'r', encoding="utf8", errors="ignore") out = f.read().rstrip('\n') f.close() except IOError as e: pinfo("cannot read '%s', error=%d - %s" %(path, e.errno, str(e))) raise except: perror("Cannot open '%s', unexpected error: %s"%(path, sys.exc_info()[0])) raise return out def add_rule(self, d): r = RET_OK if 'path' not in d: perror("'path' should be defined:", d) r |= RET_ERR if 'fmt' in d and 'ref' in d: perror("'fmt' and 'ref' are exclusive:", d) r |= RET_ERR if 'path' in d: d['path'] = d['path'].strip() if 'ref' in d: d['ref'] = d['ref'].strip() if not d['path'] in self.upds: self.upds[d['path']] = 0 if not d['path'] in self.files: try: fsz = os.path.getsize(d['path']) except: fsz = 0 if fsz > MAXSZ: self.ok[d['path']] = 0 self.files[d['path']] = '' perror("file '%s' is too large [%.2f Mb] to fit" %(d['path'], fsz/(1024.*1024))) r |= RET_ERR else: try: self.files[d['path']] = self.read_file(d['path']) self.ok[d['path']] = 1 except: self.files[d['path']] = "" self.ok[d['path']] = 0 r |= RET_ERR c = '' if 'fmt' in d: c = self.parse_fmt(d['fmt']) elif 'ref' in d: c = self.parse_ref(d['ref']) else: perror("'fmt' or 'ref' should be defined:", d) r |= RET_ERR c = c.strip() if re.match(d['check'], c) is not None or len(c) == 0: val = True else: val = False r |= RET_ERR self.checks.append({'check':d['check'], 'path':d['path'], 'add':c, 'valid':val}) return r def check(self): r = RET_OK for ck in self.checks: if not ck['valid']: perror("rule error: '%s' does not match target content" % ck['check']) r |= RET_ERR continue if self.ok[ck['path']] != 1: r |= RET_ERR continue pr = RET_OK m = 0 ok = 0 lines = self.files[ck['path']].split('\n') for line in lines: if re.match(ck['check'], line): m += 1 if len(ck['add']) > 0 and line == ck['add']: pinfo("line '%s' found in '%s'" %(line, ck['path'])) ok += 1 if m > 1: perror("duplicate match of pattern '%s' in '%s'"%(ck['check'], ck['path'])) pr |= RET_ERR if len(ck['add']) == 0: if m > 0: perror("pattern '%s' found in %s"%(ck['check'], ck['path'])) pr |= RET_ERR else: pinfo("pattern '%s' not found in %s"%(ck['check'], ck['path'])) elif ok == 0: perror("line '%s' not found in %s"%(ck['add'], ck['path'])) pr |= RET_ERR elif m == 0: perror("pattern '%s' not found in %s"%(ck['check'], ck['path'])) pr |= RET_ERR r |= pr return r def rewrite_files(self): r = RET_OK for path in self.files: if self.upds[path] == 0: continue if self.ok[path] != 1: r |= RET_ERR continue if not os.path.exists(path): perror("'%s' will be created, please check owner and permissions" %path) try: f = codecs.open(path, 'w', encoding="utf8") f.write(self.files[path]) f.close() pinfo("'%s' successfully rewritten" %path) except: perror("failed to rewrite '%s'" %path) r |= RET_ERR return r def fix(self): r = RET_OK for ck in self.checks: if not ck['valid']: perror("rule error: '%s' does not match target content" % ck['check']) r |= RET_ERR continue if self.ok[ck['path']] != 1: r |= RET_ERR continue need_rewrite = False m = 0 lines = self.files[ck['path']].rstrip('\n').split('\n') for i, line in enumerate(lines): if re.match(ck['check'], line): m += 1 if m == 1: if line != ck['add']: # rewrite line pinfo("rewrite %s:%d:'%s', new content: '%s'" %(ck['path'], i, line, ck['add'])) lines[i] = ck['add'] need_rewrite = True elif m > 1: # purge dup pinfo("remove duplicate line %s:%d:'%s'" %(ck['path'], i, line)) lines[i] = "" need_rewrite = True if m == 0 and len(ck['add']) > 0: pinfo("add line '%s' to %s"%(ck['add'], ck['path'])) lines.append(ck['add']) need_rewrite = True if need_rewrite: self.files[ck['path']] = '\n'.join(lines).rstrip("\n")+"\n" self.upds[ck['path']] = 1 r |= self.rewrite_files() return r if __name__ == "__main__": main(CompFileInc) opensvc-1.8~20170412/var/compliance/com.opensvc/utilities.py0000644000175000017500000000136013073467726024004 0ustar jkelbertjkelbert#!/usr/bin/env python from __future__ import print_function import os import sys def is_exe(fpath): """Returns True if file path is executable, False otherwize does not follow symlink """ return os.path.exists(fpath) and os.access(fpath, os.X_OK) def which(program): """Returns True if program is in PATH and executable, False otherwize """ fpath, fname = os.path.split(program) if fpath and is_exe(program): return program for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None if __name__ == "__main__": print("this file is for import into compliance objects", file=sys.stderr) opensvc-1.8~20170412/var/compliance/com.opensvc/process.py0000755000175000017500000003200413073467726023451 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_PROC_", "example_value": """ [ { "comm": "foo", "uid": 2345, "state": "on", "user": "foou" }, { "comm": "bar", "state": "off", "uid": 2345 } ] """, "description": """* Checks if a process is present, specifying its comm, and optionnaly its owner's uid and/or username. """, "form_definition": """ Desc: | A rule defining a process that should be running or not running on the target host, its owner's username and the command to launch it or to stop it. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: process Inputs: - Id: comm Label: Command DisplayModeLabel: comm LabelCss: action16 Mandatory: No Type: string Help: The Unix process command, as shown in the ps comm column. - Id: args Label: Arguments DisplayModeLabel: args LabelCss: action16 Mandatory: No Type: string Help: The Unix process arguments, as shown in the ps args column. - Id: state Label: State DisplayModeLabel: state LabelCss: action16 Type: string Mandatory: Yes Default: on Candidates: - "on" - "off" Help: The expected process state. - Id: uid Label: Owner user id DisplayModeLabel: uid LabelCss: guy16 Type: integer Help: The Unix user id owning the process. - Id: user Label: Owner user name DisplayModeLabel: user LabelCss: guy16 Type: string Help: The Unix user name owning the process. - Id: start Label: Start command DisplayModeLabel: start LabelCss: action16 Type: string Help: The command to start or stop the process, including the executable arguments. The executable must be defined with full path. """, } import os import sys import json import re from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * from utilities import which class CompProcess(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname not in ['Linux', 'AIX', 'SunOS', 'FreeBSD', 'Darwin', 'HP-UX']: perror('module not supported on', self.sysname) raise NotApplicable() if self.sysname == 'HP-UX' and 'UNIX95' not in os.environ: os.environ['UNIX95'] = "" self.process = self.get_rules() self.validate_process() if len(self.process) == 0: raise NotApplicable() self.load_ps() def load_ps_args(self): self.ps_args = {} cmd = ['ps', '-e', '-o', 'pid,uid,user,args'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: perror("unable to fetch ps") raise ComplianceError out = bdecode(out) lines = out.splitlines() if len(lines) < 2: return for line in lines[1:]: l = line.split() if len(l) < 4: continue pid, uid, user = l[:3] args = " ".join(l[3:]) if args not in self.ps_args: self.ps_args[args] = [(pid, int(uid), user)] else: self.ps_args[args].append((pid, int(uid), user)) def load_ps_comm(self): self.ps_comm = {} cmd = ['ps', '-e', '-o', 'comm,pid,uid,user'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: perror("unable to fetch ps") raise ComplianceError out = bdecode(out) lines = out.splitlines() if len(lines) < 2: return for line in lines[1:]: l = line.split() if len(l) != 4: continue comm, pid, uid, user = l if comm not in self.ps_comm: self.ps_comm[comm] = [(pid, int(uid), user)] else: self.ps_comm[comm].append((pid, int(uid), user)) def load_ps(self): self.load_ps_comm() self.load_ps_args() def validate_process(self): l = [] for process in self.process: if self._validate_process(process) == RET_OK: l.append(process) self.process = l def _validate_process(self, process): if 'comm' not in process and 'args' not in process: perror(process, 'rule is malformed ... nor comm nor args key present') return RET_ERR if 'uid' in process and type(process['uid']) != int: perror(process, 'rule is malformed ... uid value must be integer') return RET_ERR return RET_OK def get_keys_args(self, args): found = [] for key in self.ps_args: if re.match(args, key) is not None: found.append(key) return found def get_keys_comm(self, comm): found = [] for key in self.ps_comm: if re.match(comm, key) is not None: found.append(key) return found def check_present_args(self, args, verbose): if len(args.strip()) == 0: return RET_OK found = self.get_keys_args(args) if len(found) == 0: if verbose: perror('process with args', args, 'is not started ... should be') return RET_ERR else: if verbose: pinfo('process with args', args, 'is started ... on target') return RET_OK def check_present_comm(self, comm, verbose): if len(comm.strip()) == 0: return RET_OK found = self.get_keys_comm(comm) if len(found) == 0: if verbose: perror('process with command', comm, 'is not started ... should be') return RET_ERR else: if verbose: pinfo('process with command', comm, 'is started ... on target') return RET_OK def check_present(self, process, verbose): r = RET_OK if 'comm' in process: r |= self.check_present_comm(process['comm'], verbose) if 'args' in process: r |= self.check_present_args(process['args'], verbose) return r def check_not_present_comm(self, comm, verbose): if len(comm.strip()) == 0: return RET_OK found = self.get_keys_comm(comm) if len(found) == 0: if verbose: pinfo('process with command', comm, 'is not started ... on target') return RET_OK else: if verbose: perror('process with command', comm, 'is started ... shoud be') return RET_ERR def check_not_present_args(self, args, verbose): if len(args.strip()) == 0: return RET_OK found = self.get_keys_args(args) if len(found) == 0: if verbose: pinfo('process with args', args, 'is not started ... on target') return RET_OK else: if verbose: perror('process with args', args, 'is started ... shoud be') return RET_ERR def check_not_present(self, process, verbose): r = 0 if 'comm' in process: r |= self.check_not_present_comm(process['comm'], verbose) if 'args' in process: r |= self.check_not_present_args(process['args'], verbose) return r def check_process(self, process, verbose=True): r = RET_OK if process['state'] == 'on': r |= self.check_present(process, verbose) if r == RET_ERR: return RET_ERR if 'uid' in process: r |= self.check_uid(process, process['uid'], verbose) if 'user' in process: r |= self.check_user(process, process['user'], verbose) else: r |= self.check_not_present(process, verbose) return r def check_uid(self, process, uid, verbose): if 'args' in process: return self.check_uid_args(process['args'], uid, verbose) if 'comm' in process: return self.check_uid_comm(process['comm'], uid, verbose) def check_uid_comm(self, comm, uid, verbose): if len(comm.strip()) == 0: return RET_OK found = False keys = self.get_keys_comm(comm) for key in keys: for _pid, _uid, _user in self.ps_comm[key]: if uid == _uid: found = True continue if found: if verbose: pinfo('process with command', comm, 'runs with uid', _uid, '... on target') else: if verbose: perror('process with command', comm, 'does not run with uid', _uid, '... should be') return RET_ERR return RET_OK def check_uid_args(self, args, uid, verbose): if len(args.strip()) == 0: return RET_OK found = False keys = self.get_keys_args(args) for key in keys: for _pid, _uid, _user in self.ps_args[key]: if uid == _uid: found = True continue if found: if verbose: pinfo('process with args', args, 'runs with uid', _uid, '... on target') else: if verbose: perror('process with args', args, 'does not run with uid', _uid, '... should be') return RET_ERR return RET_OK def check_user(self, process, user, verbose): if 'args' in process: return self.check_user_args(process['args'], user, verbose) if 'comm' in process: return self.check_user_comm(process['comm'], user, verbose) def check_user_comm(self, comm, user, verbose): if len(comm.strip()) == 0: return RET_OK if user is None or len(user) == 0: return RET_OK found = False keys = self.get_keys_comm(comm) for key in keys: for _pid, _uid, _user in self.ps_comm[key]: if user == _user: found = True continue if found: if verbose: pinfo('process with command', comm, 'runs with user', _user, '... on target') else: if verbose: perror('process with command', comm, 'runs with user', _user, '... should run with user', user) return RET_ERR return RET_OK def check_user_args(self, args, user, verbose): if len(args.strip()) == 0: return RET_OK if user is None or len(user) == 0: return RET_OK found = False keys = self.get_keys_args(args) for key in keys: for _pid, _uid, _user in self.ps_args[key]: if user == _user: found = True continue if found: if verbose: pinfo('process with args', args, 'runs with user', _user, '... on target') else: if verbose: perror('process with args', args, 'runs with user', _user, '... should run with user', user) return RET_ERR return RET_OK def fix_process(self, process): if process['state'] == 'on': if self.check_present(process, verbose=False) == RET_OK: if ('uid' in process and self.check_uid(process, process['uid'], verbose=False) == RET_ERR) or \ ('user' in process and self.check_user(process, process['user'], verbose=False) == RET_ERR): perror(process, "runs with the wrong user. can't fix.") return RET_ERR return RET_OK elif process['state'] == 'off': if self.check_not_present(process, verbose=False) == RET_OK: return RET_OK if 'start' not in process or len(process['start'].strip()) == 0: perror("undefined fix method for process", process['comm']) return RET_ERR v = process['start'].split(' ') if not which(v[0]): perror("fix command", v[0], "is not present or not executable") return RET_ERR pinfo('exec:', process['start']) try: p = Popen(v, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception as e: perror(e) return RET_ERR out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: perror(err) if p.returncode != 0: perror("fix up command returned with error code", p.returncode) return RET_ERR return RET_OK def check(self): r = 0 for process in self.process: r |= self.check_process(process) return r def fix(self): r = 0 for process in self.process: r |= self.fix_process(process) return r if __name__ == "__main__": main(CompProcess) opensvc-1.8~20170412/var/compliance/com.opensvc/cron.py0000755000175000017500000001531613073467726022743 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_CRON_ENTRY_", "example_value": "add:osvc:* * * * *:/path/to/mycron:/etc/cron.d/opensvc", "description": """* Add and Remove cron entries * Support arbitrary cron file location """, } import os import sys import shutil import glob from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class CompCron(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname == 'SunOS' : self.crontab_locs = [ '/var/spool/cron/crontabs' ] else: self.crontab_locs = [ '/etc/cron.d', '/var/spool/cron/crontabs', '/var/spool/cron', '/var/cron/tabs', ] self.ce = [] for _ce in self.get_rules_raw(): e = _ce.split(':') if len(e) < 5: perror("malformed variable %s. format: action:user:sched:cmd:[file]"%_ce) continue if e[0] not in ('add', 'del'): perror("unsupported action in variable %s. set 'add' or 'del'"%_ce) continue if len(e[2].split()) != 5: perror("malformed schedule in variable %s"%_ce) continue self.ce += [{ 'var': _ce, 'action': e[0], 'user': e[1], 'sched': e[2], 'cmd': e[3], 'file': e[4], }] if len(self.ce) == 0: raise NotApplicable() def activate_cron(self, cron_file): """ Activate changes (actually only needed on HP-UX) """ if '/var/spool/' in cron_file: pinfo("tell crond about the change") cmd = ['crontab', cron_file] process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True) buff = process.communicate() def fixable(self): r = RET_OK for e in self.ce: try: self._fixable_cron(e) except ComplianceError, e: perror(str(e)) r = RET_ERR except Unfixable, e: perror(str(e)) return r return r def fix(self): r = RET_OK for e in self.ce: try: if e['action'] == 'add': self._add_cron(e) elif e['action'] == 'del': self._del_cron(e) except ComplianceError, e: perror(str(e)) r = RET_ERR except Unfixable, e: perror(str(e)) return r return r def check(self): r = RET_OK for e in self.ce: try: self._check_cron(e) except ComplianceError, e: perror(str(e)) r = RET_ERR except Unfixable, e: perror(str(e)) return r return r def get_cron_file(self, e): """ order of preference """ cron_file = None for loc in self.crontab_locs: if not os.path.exists(loc): continue if loc == '/etc/cron.d': cron_file = os.path.join(loc, e['file']) else: cron_file = os.path.join(loc, e['user']) break return cron_file def format_entry(self, cron_file, e): if 'cron.d' in cron_file: s = ' '.join([e['sched'], e['user'], e['cmd']]) else: s = ' '.join([e['sched'], e['cmd']]) return s def _fixable_cron(self, e): cron_file = self.get_cron_file(e) if cron_file is None: raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs)) def _check_cron(self, e): cron_file = self.get_cron_file(e) if cron_file is None: raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs)) s = self.format_entry(cron_file, e) if not os.path.exists(cron_file): raise ComplianceError("cron entry not found '%s' in '%s'"%(s, cron_file)) with open(cron_file, 'r') as f: new = f.readlines() found = False for line in new: if s == line[:-1]: found = True break if not found and e['action'] == 'add': raise ComplianceError("wanted cron entry not found: '%s' in '%s'"%(s, cron_file)) if found and e['action'] == 'del': raise ComplianceError("unwanted cron entry found: '%s' in '%s'"%(s, cron_file)) def _del_cron(self, e): cron_file = self.get_cron_file(e) if cron_file is None: raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs)) s = self.format_entry(cron_file, e) if not os.path.exists(cron_file): return new = [] with open(cron_file, 'r') as f: lines = f.readlines() for line in lines: if s == line[:-1]: pinfo("delete entry '%s' from '%s'"%(s, cron_file)) continue new.append(line) if len(new) == 0: pinfo('deleted last entry of %s. delete file too.'%cron_file) os.unlink(cron_file) else: with open(cron_file, 'w') as f: f.write(''.join(new)) self.activate_cron(cron_file) def _add_cron(self, e): cron_file = self.get_cron_file(e) if cron_file is None: raise Unfixable("no crontab usual location found (%s)"%str(self.crontab_locs)) s = self.format_entry(cron_file, e) new = False if os.path.exists(cron_file): with open(cron_file, 'r') as f: new = f.readlines() found = False for line in new: if s == line[:-1]: found = True break if not found: new.append(s+'\n') else: new = [s+'\n'] if not new: raise ComplianceError("problem preparing the new crontab '%s'"%cron_file) pinfo("add entry '%s' to '%s'"%(s, cron_file)) with open(cron_file, 'w') as f: f.write(''.join(new)) self.activate_cron(cron_file) if __name__ == "__main__": main(CompCron) opensvc-1.8~20170412/var/compliance/com.opensvc/fs.py0000755000175000017500000005400113073467726022404 0ustar jkelbertjkelbert#!/usr/bin/env python """ Verify file content. The collector provides the format with wildcards. The module replace the wildcards with contextual values. The variable format is json-serialized: [{ "dev": "lv_applisogm", "size": "1024M", "mnt": "/%%ENV:SVCNAME%%/applis/ogm", "vg": ["%%ENV:SVCNAME%%", "vgAPPLIS", "vgCOMMUN01", "vgLOCAL"] }] Wildcards: %%ENV:VARNAME%% Any environment variable value Toggle: %%ENV:FS_STRIP_SVCNAME_FROM_DEV_IF_IN_VG%% """ import os import sys import json import stat import re from subprocess import * from stat import * sys.path.append(os.path.dirname(__file__)) from comp import * from utilities import which class CompFs(object): def __init__(self, prefix='OSVC_COMP_FS_'): self.prefix = prefix.upper() self.sysname, self.nodename, x, x, self.machine = os.uname() self.sysname = self.sysname.replace('-', '') self.fs = [] self.res = {} self.res_status = {} if 'OSVC_COMP_SERVICES_SVCNAME' in os.environ: self.svcname = os.environ['OSVC_COMP_SERVICES_SVCNAME'] self.osvc_service = True else: os.environ['OSVC_COMP_SERVICES_SVCNAME'] = "" self.svcname = None self.osvc_service = False keys = [key for key in os.environ if key.startswith(self.prefix)] if len(keys) == 0: raise NotApplicable() self.vglist() for k in keys: try: self.fs += self.add_fs(os.environ[k]) except ValueError: perror('failed to parse variable', os.environ[k]) if len(self.fs) == 0: raise NotApplicable() self.fs.sort(lambda x, y: cmp(x['mnt'], y['mnt'])) def vglist_HPUX(self): import glob l = glob.glob("/dev/*/group") l = map(lambda x: x.split('/')[2], l) self.vg = l def vglist_Linux(self): if not which("vgs"): perror('vgs command not found') raise ComplianceError() cmd = ['vgs', '-o', 'vg_name', '--noheadings'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: perror('failed to list volume groups') raise ComplianceError() out = bdecode(out) self.vg = out.split() def vglist(self): if not hasattr(self, 'vglist_'+self.sysname): perror(self.sysname, 'not supported') raise NotApplicable() getattr(self, 'vglist_'+self.sysname)() def subst(self, v): if type(v) == list: l = [] for _v in v: l.append(self.subst(_v)) return l if type(v) != str and type(v) != unicode: return v p = re.compile('%%ENV:\w+%%') for m in p.findall(v): s = m.strip("%").replace('ENV:', '') if s in os.environ: _v = os.environ[s] elif 'OSVC_COMP_'+s in os.environ: _v = os.environ['OSVC_COMP_'+s] else: perror(s, 'is not an env variable') raise NotApplicable() v = v.replace(m, _v) return v.strip() def add_fs(self, v): if type(v) == str or type(v) == unicode: d = json.loads(v) else: d = v l = [] # recurse if multiple fs are specified in a list of dict if type(d) == list: for _d in d: l += self.add_fs(_d) return l if type(d) != dict: perror("not a dict:", d) return l if 'dev' not in d: perror('dev should be in the dict:', d) return l if 'mnt' not in d: perror('mnt should be in the dict:', d) return l if 'size' not in d: perror('size should be in the dict:', d) return l if 'vg' not in d: perror('vg should be in the dict:', d) return l if 'type' not in d: perror('type should be in the dict:', d) return l if 'opts' not in d: perror('opts should be in the dict:', d) return l if type(d['vg']) != list: d['vg'] = [d['vg']] d['vg_orig'] = d['vg'] d['vg'] = self.subst(d['vg']) d['prefvg'] = self.prefvg(d) d['dev'] = self.strip_svcname(d) for k in ('dev', 'mnt', 'size', 'type', 'opts'): d[k] = self.subst(d[k]) d['mnt'] = self.normpath(d['mnt']) d['devpath'] = self.devpath(d) d['rdevpath'] = self.rdevpath(d) try: d['size'] = self.size_to_mb(d) except ComplianceError: return [] return [d] def strip_svcname(self, fs): key = "OSVC_COMP_FS_STRIP_SVCNAME_FROM_DEV_IF_IN_VG" if key not in os.environ or os.environ[key] != "true": return fs['dev'] if "%%ENV:SERVICES_SVCNAME%%" not in fs['vg_orig'][fs['prefvg_idx']]: return fs['dev'] # the vg is dedicated to the service. no need to embed # the service name in the lv name too s = fs['dev'].replace("%%ENV:SERVICES_SVCNAME%%", "") if s == "lv_": s = "root" return s def normpath(self, p): l = p.split('/') p = os.path.normpath(os.path.join(os.sep, *l)) return p def rdevpath(self, d): return '/dev/%s/r%s'%(d['prefvg'], d['dev']) def devpath(self, d): return '/dev/%s/%s'%(d['prefvg'], d['dev']) def prefvg(self, d): lc_candidate_vg = map(lambda x: x.lower(), d['vg']) lc_existing_vg = map(lambda x: x.lower(), self.vg) for i, vg in enumerate(lc_candidate_vg): if vg in lc_existing_vg: d['prefvg_idx'] = i # return capitalized vg name return self.vg[lc_existing_vg.index(vg)] perror("no candidate vg is available on this node for dev %s"%d['dev']) raise NotApplicable() def check_fs_mnt(self, fs, verbose=False): if not os.path.exists(fs['mnt']): if verbose: perror("mount point", fs['mnt'], "does not exist") return 1 if verbose: pinfo("mount point", fs['mnt'], "exists") return 0 def check_fs_dev_exists(self, fs, verbose=False): if not os.path.exists(fs['devpath']): if verbose: perror("device", fs['devpath'], "does not exist") return 1 if verbose: pinfo("device", fs['devpath'], "exists") return 0 def check_fs_dev_stat(self, fs, verbose=False): mode = os.stat(fs['devpath'])[ST_MODE] if not S_ISBLK(mode): if verbose: perror("device", fs['devpath'], "is not a block device") return 1 if verbose: pinfo("device", fs['devpath'], "is a block device") return 0 def find_vg_rid(self, vgname): rids = [ rid for rid in self.res_status.keys() if rid.startswith('vg#') ] for rid in rids: if self.get_res_item(rid, 'vgname') == vgname: return rid return None def private_svc_vg_down(self, fs): if self.svcname is None or not self.osvc_service: return False rid = self.find_vg_rid(fs['prefvg']) if rid is None: # vg is not driven by the service return False if self.res_status[rid] not in ('up', 'stdby up'): return False return True def check_fs_dev(self, fs, verbose=False): if self.private_svc_vg_down(fs): # don't report error on passive node with private svc prefvg return 0 if self.check_fs_dev_exists(fs, verbose) == 1: return 1 if self.check_fs_dev_stat(fs, verbose) == 1: return 1 return 0 def fix_fs_dev(self, fs): if self.check_fs_dev(fs, False) == 0: return 0 if self.check_fs_dev_exists(fs, False) == 0: perror("device", fs['devpath'], "already exists. won't fix.") return 1 return self.createlv(fs) def createlv(self, fs): if not hasattr(self, 'createlv_'+self.sysname): perror(self.sysname, 'not supported') raise NotApplicable() return getattr(self, 'createlv_'+self.sysname)(fs) def size_to_mb(self, fs): s = fs['size'] unit = s[-1] size = int(s[:-1]) if unit == 'T': s = str(size*1024*1024) elif unit == 'G': s = str(size*1024) elif unit == 'M': s = str(size) elif unit == 'K': s = str(size//1024) else: perror("unknown size unit in rule: %s (use T, G, M or K)"%s) raise ComplianceError() return s def createlv_HPUX(self, fs): cmd = ['lvcreate', '-n', fs['dev'], '-L', fs['size'], fs['prefvg']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: pinfo(err) if p.returncode != 0: return 1 return 0 def createlv_Linux(self, fs): os.environ["LVM_SUPPRESS_FD_WARNINGS"] = "1" cmd = ['lvcreate', '-n', fs['dev'], '-L', fs['size']+'M', fs['prefvg']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: pinfo(err) if p.returncode != 0: return 1 return 0 def fix_fs_mnt(self, fs, verbose=False): if self.check_fs_mnt(fs, False) == 0: return 0 pinfo("create", fs['mnt'], "mount point") os.makedirs(fs['mnt']) return 0 def check_fs_fmt_HPUX_vxfs(self, fs, verbose=False): cmd = ['fstyp', fs['devpath']] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if p.returncode != 0 or "vxfs" not in out: if verbose: perror(fs['devpath'], "is not formatted") return 1 if verbose: pinfo(fs['devpath'], "is correctly formatted") return 0 def check_fs_fmt_HPUX(self, fs, verbose=False): if fs['type'] == 'vxfs': return self.check_fs_fmt_HPUX_vxfs(fs, verbose) perror("unsupported fs type: %s"%fs['type']) return 1 def check_fs_fmt_Linux(self, fs, verbose=False): if fs['type'] in ('ext2', 'ext3', 'ext4'): return self.check_fs_fmt_Linux_ext(fs, verbose) perror("unsupported fs type: %s"%fs['type']) return 1 def check_fs_fmt_Linux_ext(self, fs, verbose=False): cmd = ['tune2fs', '-l', fs['devpath']] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if p.returncode != 0: if verbose: perror(fs['devpath'], "is not formatted") return 1 if verbose: pinfo(fs['devpath'], "is correctly formatted") return 0 def fix_fs_fmt_Linux_ext(self, fs): cmd = ['mkfs.'+fs['type'], '-q', '-b', '4096', fs['devpath']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: pinfo(err) if p.returncode != 0: return 1 cmd = ['tune2fs', '-m', '0', '-c', '0', '-i', '0', fs['devpath']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: pinfo(err) if p.returncode != 0: return 1 return 0 def fix_fs_fmt_Linux(self, fs): if fs['type'] in ('ext2', 'ext3', 'ext4'): return self.fix_fs_fmt_Linux_ext(fs) perror("unsupported fs type: %s"%fs['type']) return 1 def check_fs_fmt(self, fs, verbose=False): if not hasattr(self, 'check_fs_fmt_'+self.sysname): perror(self.sysname, 'not supported') raise NotApplicable() return getattr(self, 'check_fs_fmt_'+self.sysname)(fs, verbose) def fix_fs_fmt_HPUX_vxfs(self, fs): cmd = ['newfs', '-F', 'vxfs', '-b', '8192', fs['rdevpath']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: pinfo(err) if p.returncode != 0: return 1 return 0 def fix_fs_fmt_HPUX(self, fs): if fs['type'] == 'vxfs': return self.fix_fs_fmt_HPUX_vxfs(fs) perror("unsupported fs type: %s"%fs['type']) return 1 if not hasattr(self, 'check_fs_fmt_'+self.sysname): perror(self.sysname, 'not supported') raise NotApplicable() return getattr(self, 'check_fs_fmt_'+self.sysname)(fs, verbose) def fix_fs_fmt(self, fs): if self.check_fs_fmt(fs) == 0: return 0 if not hasattr(self, 'fix_fs_fmt_'+self.sysname): perror(self.sysname, 'not supported') raise NotApplicable() return getattr(self, 'fix_fs_fmt_'+self.sysname)(fs) def get_res_item(self, rid, item): cmd = ['svcmgr', '-s', self.svcname, 'get', '--param', '.'.join((rid, item))] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if p.returncode != 0: perror(' '.join(cmd), 'failed') return 1 return out.strip() def get_res(self, rid): if rid in self.res: return self.res[rid] d = {} d['mnt'] = self.get_res_item(rid, 'mnt') d['dev'] = self.get_res_item(rid, 'dev') self.res[rid] = d return d def get_fs_rids(self, refresh=False): if not refresh and hasattr(self, 'rids'): return self.rids cmd = ['svcmgr', '-s', self.svcname, 'json_status'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) for line in out.splitlines(): if line.startswith('{'): out = line break try: # json_status returns 0, even when it outs no data self.res_status = json.loads(out)['resources'] except Exception as e: pinfo(e) pinfo(out) self.rids = [] self.osvc_service = False return self.rids self.rids = [ k for k in self.res_status.keys() if k.startswith('fs#') ] return self.rids def find_rid(self, fs): found = False for rid in self.rids: d = self.get_res(rid) if d['mnt'] == fs['mnt'] and d['dev'] == fs['devpath']: return rid return None def fix_fs_local(self, fs): if self.svcname is not None and self.osvc_service: return 0 if self.check_fs_local(fs, False) == 0: return 0 with open("/etc/fstab", "r") as f: lines = f.read().split('\n') if len(lines[-1]) == 0: del(lines[-1]) p = re.compile(r'\s*%s\s+'%(fs['devpath'])) newline = "%s %s %s %s 0 2"%(fs['devpath'], fs['mnt'], fs['type'], fs['opts']) for i, line in enumerate(lines): if line == newline: return 0 if re.match(p, line) is not None: pinfo("remove '%s' from fstab"%line) del lines[i] lines.append(newline) pinfo("append '%s' to fstab"%newline) try: with open("/etc/fstab", "w") as f: f.write("\n".join(lines)+'\n') except: perror("failed to rewrite fstab") return 1 pinfo("fstab rewritten") return 0 def check_fs_local(self, fs, verbose=False): if self.svcname is not None and self.osvc_service: return 0 p = re.compile(r'\s*%s\s+%s'%(fs['devpath'], fs['mnt'])) with open("/etc/fstab", "r") as f: buff = f.read() if re.search(p, buff) is not None: if verbose: pinfo("%s@%s resource correctly set in fstab"%(fs['mnt'], fs['devpath'])) return 0 if verbose: perror("%s@%s resource correctly set in fstab"%(fs['mnt'], fs['devpath'])) return 1 def check_fs_svc(self, fs, verbose=False): if self.svcname is None: return 0 rids = self.get_fs_rids() if not self.osvc_service: return 0 rid = self.find_rid(fs) if rid is None: if verbose: perror("%s@%s resource not found in service %s"%(fs['mnt'], fs['devpath'], self.svcname)) return 1 if verbose: pinfo("%s@%s resource correctly set in service %s"%(fs['mnt'], fs['devpath'], self.svcname)) return 0 def fix_fs_svc(self, fs): if not self.osvc_service or self.check_fs_svc(fs, False) == 0: return 0 cmd = ['svcmgr', '-s', self.svcname, 'get', '--param', 'DEFAULT.encapnodes'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if self.nodename in out.strip().split(): tags = "encap" else: tags = '' cmd = ['svcmgr', '-s', self.svcname, 'update', '--resource', '{"rtype": "fs", "mnt": "%s", "dev": "%s", "type": "%s", "mnt_opt": "%s", "tags": "%s"}'%(fs['mnt'], fs['devpath'], fs['type'], fs['opts'], tags)] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if p.returncode != 0: perror("unable to fetch %s json status"%self.svcname) return 1 return 0 def check_fs_mounted(self, fs, verbose=False): if os.path.ismount(fs['mnt']): if verbose: pinfo(fs['mnt'], "is mounted") return 0 if verbose: perror(fs['mnt'], "is not mounted") return 1 def fix_fs_mounted(self, fs): if self.check_fs_mounted(fs, False) == 0: return 0 if self.svcname is None or not self.osvc_service: return self.fix_fs_mounted_local(fs) else: return self.fix_fs_mounted_svc(fs) def fix_fs_mounted_svc(self, fs): rids = self.get_fs_rids(refresh=True) rid = self.find_rid(fs) if rid is None: perror("fs resource with mnt=%s not found in service %s"%(fs['mnt'], self.svcname)) return 1 cmd = ['svcmgr', '-s', self.svcname, '--rid', rid, 'mount', '--cluster'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if p.returncode != 0 and "unsupported action" in err: cmd = ['svcmgr', '-s', self.svcname, '--rid', rid, 'startfs', '--cluster'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() pinfo(' '.join(cmd)) if p.returncode != 0: perror("unable to mount %s"%fs['mnt']) return 1 return 0 def fix_fs_mounted_local(self, fs): cmd = ['mount', fs['mnt']] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) err = bdecode(err) if len(out) > 0: pinfo(out) if len(err) > 0: perror(err) if p.returncode != 0: perror("unable to mount %s"%fs['mnt']) return 1 return 0 def check_fs(self, fs, verbose=False): r = 0 r |= self.check_fs_mnt(fs, verbose) r |= self.check_fs_dev(fs, verbose) r |= self.check_fs_fmt(fs, verbose) r |= self.check_fs_svc(fs, verbose) r |= self.check_fs_local(fs, verbose) r |= self.check_fs_mounted(fs, verbose) return r def fix_fs(self, fs): if self.fix_fs_mnt(fs) != 0: return 1 if self.fix_fs_dev(fs) != 0: return 1 if self.fix_fs_fmt(fs) != 0: return 1 if self.fix_fs_svc(fs) != 0: return 1 if self.fix_fs_local(fs) != 0: return 1 if self.fix_fs_mounted(fs) != 0: return 1 return 0 def fixable(self): return RET_NA def check(self): r = 0 for f in self.fs: r |= self.check_fs(f, verbose=True) return r def fix(self): r = 0 for f in self.fs: r |= self.fix_fs(f) return r if __name__ == "__main__": syntax = """syntax: %s PREFIX check|fixable|fix"""%sys.argv[0] if len(sys.argv) != 3: perror("wrong number of arguments") perror(syntax) sys.exit(RET_ERR) try: o = CompFs(sys.argv[1]) if sys.argv[2] == 'check': RET = o.check() elif sys.argv[2] == 'fix': RET = o.fix() elif sys.argv[2] == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except NotApplicable: sys.exit(RET_NA) except ComplianceError: sys.exit(RET_ERR) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) opensvc-1.8~20170412/var/compliance/com.opensvc/sudoers.py0000755000175000017500000000451013073467726023460 0ustar jkelbertjkelbert#!/usr/bin/env python """ Same as files compliance object, but verifies the sudoers declaration syntax using visudo in check mode. The variable format is json-serialized: { "path": "/some/path/to/file", "fmt": "root@corp.com %%HOSTNAME%%@corp.com", "uid": 500, "gid": 500, } Wildcards: %%ENV:VARNAME%% Any environment variable value %%HOSTNAME%% Hostname %%SHORT_HOSTNAME%% Short hostname """ import os import sys from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * from files import CompFiles class CompSudoers(CompFiles): def check_file_syntax(self, f, verbose=False): cmd = ['visudo', '-c', '-f', '-'] p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = p.communicate(input=bencode(f['fmt'])) if p.returncode != 0: if verbose: perror("target sudoers rules syntax error.") else: perror("target sudoers rules syntax error. abort installation.") return p.returncode def check(self): r = 0 for f in self.files: r |= self.check_file_syntax(f, verbose=True) r |= self.check_file(f, verbose=True) return r def fix(self): r = 0 for f in self.files: if self.check_file_syntax(f): r |= 1 # refuse to install a corrupted sudoers file continue r |= self.fix_file_fmt(f) r |= self.fix_file_mode(f) r |= self.fix_file_owner(f) return r if __name__ == "__main__": syntax = """syntax: %s PREFIX check|fixable|fix"""%sys.argv[0] if len(sys.argv) != 3: perror("wrong number of arguments") perror(syntax) sys.exit(RET_ERR) try: o = CompSudoers(sys.argv[1]) if sys.argv[2] == 'check': RET = o.check() elif sys.argv[2] == 'fix': RET = o.fix() elif sys.argv[2] == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except ComplianceError: sys.exit(RET_ERR) except NotApplicable: sys.exit(RET_NA) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) opensvc-1.8~20170412/var/compliance/com.opensvc/file.py0000755000175000017500000003033613073467726022720 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_FILE_", "example_value": """ { "path": "/some/path/to/file", "fmt": "root@corp.com %%HOSTNAME%%@corp.com", "uid": 500, "gid": 500, } """, "description": """* Verify and install file content. * Verify and set file or directory ownership and permission * Directory mode is triggered if the path ends with / Special wildcards:: %%ENV:VARNAME%% Any environment variable value %%HOSTNAME%% Hostname %%SHORT_HOSTNAME%% Short hostname """, "form_definition": """ Desc: | A file rule, fed to the 'files' compliance object to create a directory or a file and set its ownership and permissions. For files, a reference content can be specified or pointed through an URL. Css: comp48 Outputs: - Dest: compliance variable Class: file Type: json Format: dict Inputs: - Id: path Label: Path DisplayModeLabel: path LabelCss: action16 Mandatory: Yes Help: File path to install the reference content to. A path ending with '/' is treated as a directory and as such, its content need not be specified. Type: string - Id: mode Label: Permissions DisplayModeLabel: perm LabelCss: action16 Help: "In octal form. Example: 644" Type: integer - Id: uid Label: Owner DisplayModeLabel: uid LabelCss: guy16 Help: Either a user ID or a user name Type: string or integer - Id: gid Label: Owner group DisplayModeLabel: gid LabelCss: guy16 Help: Either a group ID or a group name Type: string or integer - Id: ref Label: Content URL pointer DisplayModeLabel: ref LabelCss: loc Help: "Examples: http://server/path/to/reference_file https://server/path/to/reference_file ftp://server/path/to/reference_file ftp://login:pass@server/path/to/reference_file" Type: string - Id: fmt Label: Content DisplayModeLabel: fmt LabelCss: hd16 Css: pre Help: A reference content for the file. The text can embed substitution variables specified with %%ENV:VAR%%. Type: text """ } import os import sys import json import stat import re import urllib import ssl import tempfile import pwd import grp from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class InitError(Exception): pass class CompFiles(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self._usr = {} self._grp = {} self.sysname, self.nodename, x, x, self.machine = os.uname() self.files = [] for rule in self.get_rules(): try: self.files += self.add_file(rule) except InitError: continue except ValueError: perror('file: failed to parse variable', os.environ[k]) if len(self.files) == 0: raise NotApplicable() def parse_fmt(self, d, add_linefeed=True): if isinstance(d['fmt'], int): d['fmt'] = str(d['fmt']) d['fmt'] = d['fmt'].replace('%%HOSTNAME%%', self.nodename) d['fmt'] = d['fmt'].replace('%%SHORT_HOSTNAME%%', self.nodename.split('.')[0]) d['fmt'] = self.subst(d['fmt']) if add_linefeed and not d['fmt'].endswith('\n'): d['fmt'] += '\n' return [d] def parse_ref(self, d): f = tempfile.NamedTemporaryFile() tmpf = f.name f.close() try: self.urlretrieve(d['ref'], tmpf) except IOError as e: perror("file ref", d['ref'], "download failed:", e) raise InitError() with open(tmpf, "r") as f: d['fmt'] = f.read() return self.parse_fmt(d, add_linefeed=False) def add_file(self, d): if 'path' not in d: perror('file: path should be in the dict:', d) RET = RET_ERR return [] if 'fmt' not in d and 'ref' not in d and not d['path'].endswith("/"): perror('file: fmt or ref should be in the dict:', d) RET = RET_ERR return [] if 'fmt' in d and 'ref' in d: perror('file: fmt and ref are exclusive:', d) RET = RET_ERR return [] try: d["uid"] = int(d["uid"]) except: pass try: d["gid"] = int(d["gid"]) except: pass if 'fmt' in d: return self.parse_fmt(d) if 'ref' in d: if not d["ref"].startswith("safe://"): return self.parse_ref(d) return [d] def fixable(self): return RET_NA def check_file_fmt(self, f, verbose=False): if not os.path.exists(f['path']): return RET_ERR if f['path'].endswith('/'): # don't check content if it's a directory return RET_OK if 'ref' in f and f['ref'].startswith("safe://"): return self.check_file_fmt_safe(f, verbose=verbose) else: return self.check_file_fmt_buffered(f, verbose=verbose) def fix_file_fmt_safe(self, f): pinfo("file reference %s download to %s" % (f["ref"], f["path"])) tmpfname = self.get_safe_file(f["ref"]) pinfo("file %s content install" % f["path"]) import shutil shutil.copy(tmpfname, f["path"]) os.unlink(tmpfname) return RET_OK def check_file_fmt_safe(self, f, verbose=False): try: data = self.collector_safe_file_get_meta(f["ref"]) except ComplianceError as e: raise ComplianceError(str(e)) target_md5 = data.get("md5") current_md5 = self.md5(f["path"]) if target_md5 == current_md5: pinfo("file %s md5 verified" % f["path"]) return RET_OK else: perror("file %s content md5 differs from its reference" % f["path"]) if verbose and data["size"] < 1000000: tmpfname = self.get_safe_file(f["ref"]) self.check_file_diff(f, tmpfname, verbose=verbose) os.unlink(tmpfname) return RET_ERR def get_safe_file(self, uuid): tmpf = tempfile.NamedTemporaryFile() tmpfname = tmpf.name tmpf.close() try: self.collector_safe_file_download(uuid, tmpfname) except Exception as e: raise ComplianceError("%s: %s" % (uuid, str(e))) return tmpfname def check_file_fmt_buffered(self, f, verbose=False): tmpf = tempfile.NamedTemporaryFile() tmpfname = tmpf.name tmpf.close() with open(tmpfname, 'w') as tmpf: tmpf.write(f['fmt']) ret = self.check_file_diff(f, tmpfname, verbose=verbose) os.unlink(tmpfname) return ret def check_file_diff(self, f, refpath, verbose=False): if "OSVC_COMP_NODES_OS_NAME" in os.environ and os.environ['OSVC_COMP_NODES_OS_NAME'] in ("Linux"): cmd = ['diff', '-u', f['path'], refpath] else: cmd = ['diff', f['path'], refpath] p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) if verbose and len(out) > 0: perror(out.strip('\n')) if p.returncode != 0: return RET_ERR return RET_OK def check_file_mode(self, f, verbose=False): if 'mode' not in f: return RET_OK try: mode = oct(stat.S_IMODE(os.stat(f['path']).st_mode)) except: if verbose: perror("file", f['path'], 'stat() failed') return RET_ERR mode = str(mode).lstrip("0o") target_mode = str(f['mode']).lstrip("0o") if mode != target_mode: if verbose: perror("file", f['path'], 'mode should be %s but is %s'%(target_mode, mode)) return RET_ERR return RET_OK def get_uid(self, uid): if uid in self._usr: return self._usr[uid] tuid = uid if is_string(uid): try: info=pwd.getpwnam(uid) tuid = info[2] self._usr[uid] = tuid except: perror("file: user %s does not exist"%uid) raise ComplianceError() return tuid def get_gid(self, gid): if gid in self._grp: return self._grp[gid] tgid = gid if is_string(gid): try: info=grp.getgrnam(gid) tgid = info[2] self._grp[gid] = tgid except: perror("file: group %s does not exist"%gid) raise ComplianceError() return tgid def check_file_uid(self, f, verbose=False): if 'uid' not in f: return RET_OK tuid = self.get_uid(f['uid']) uid = os.stat(f['path']).st_uid if uid != tuid: if verbose: perror("file", f['path'], 'uid should be %s but is %s'%(tuid, str(uid))) return RET_ERR return RET_OK def check_file_gid(self, f, verbose=False): if 'gid' not in f: return RET_OK tgid = self.get_gid(f['gid']) gid = os.stat(f['path']).st_gid if gid != tgid: if verbose: perror("file", f['path'], 'gid should be %s but is %s'%(tgid, str(gid))) return RET_ERR return RET_OK def check_file(self, f, verbose=False): if not os.path.exists(f['path']): perror("file", f['path'], "does not exist") return RET_ERR r = 0 r |= self.check_file_fmt(f, verbose) r |= self.check_file_mode(f, verbose) r |= self.check_file_uid(f, verbose) r |= self.check_file_gid(f, verbose) if r == 0 and verbose: pinfo("file", f['path'], "is ok") return r def fix_file_mode(self, f): if 'mode' not in f: return RET_OK if self.check_file_mode(f) == RET_OK: return RET_OK try: pinfo("file %s mode set to %s"%(f['path'], str(f['mode']))) os.chmod(f['path'], int(str(f['mode']), 8)) except: return RET_ERR return RET_OK def fix_file_owner(self, f): uid = -1 gid = -1 if 'uid' not in f and 'gid' not in f: return RET_OK if 'uid' in f and self.check_file_uid(f) != RET_OK: uid = self.get_uid(f['uid']) if 'gid' in f and self.check_file_gid(f) != RET_OK: gid = self.get_gid(f['gid']) if uid == -1 and gid == -1: return RET_OK try: os.chown(f['path'], uid, gid) except: perror("file %s ownership set to %d:%d failed"%(f['path'], uid, gid)) return RET_ERR pinfo("file %s ownership set to %d:%d"%(f['path'], uid, gid)) return RET_OK def fix_file_fmt(self, f): if f['path'].endswith("/") and not os.path.exists(f['path']): try: pinfo("file: mkdir", f['path']) os.makedirs(f['path']) except: perror("file: failed to create", f['path']) return RET_ERR return RET_OK if self.check_file_fmt(f, verbose=False) == RET_OK: return RET_OK if 'ref' in f and f['ref'].startswith("safe://"): return self.fix_file_fmt_safe(f) d = os.path.dirname(f['path']) if not os.path.exists(d): pinfo("file: mkdir", d) os.makedirs(d) try: os.chown(d, self.get_uid(f['uid']), self.get_gid(f['gid'])) except Exception as e: perror("file:", e) pass try: with open(f['path'], 'w') as fi: fi.write(f['fmt']) except Exception as e: perror("file:", e) return RET_ERR pinfo("file", f['path'], "rewritten") return RET_OK def check(self): r = 0 for f in self.files: r |= self.check_file(f, verbose=True) return r def fix(self): r = 0 for f in self.files: r |= self.fix_file_fmt(f) r |= self.fix_file_mode(f) r |= self.fix_file_owner(f) return r if __name__ == "__main__": main(CompFiles) opensvc-1.8~20170412/var/compliance/com.opensvc/keyval.py0000755000175000017500000002550713073467726023300 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_GROUP_", "example_kwargs": { "path": "/etc/ssh/sshd_config", }, "example_value": """ [ { "key": "PermitRootLogin", "op": "=", "value": "yes" } ] or { "path": "/etc/ssh/sshd_config", "keys": [ { "key": "PermitRootLogin", "op": "=", "value": "yes" } ] } """, "description": """* Setup and verify keys in "key value" formatted configuration file. * Example files: sshd_config, ssh_config, ntp.conf, ... """, "form_definition": """ Desc: | A rule to set a list of parameters in simple keyword/value configuration file format. Current values can be checked as set or unset, strictly equal, or superior/inferior to their target value. Outputs: - Dest: compliance variable Type: json Format: list of dict Class: keyval Inputs: - Id: key Label: Key DisplayModeTrim: 64 DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - reset - unset - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter current value. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The configuration file parameter target value. """, } import os import sys import json sys.path.append(os.path.dirname(__file__)) from comp import * from keyval_parser import Parser, ParserError class KeyVal(CompObject): def __init__(self, prefix=None, path=None): CompObject.__init__(self, prefix=prefix, data=data) self.cf = path def init(self): self.nocf = False self.file_keys = {} if self.cf: self.file_keys[self.cf] = { "target_n_key": {}, "keys": [], } for rule in self.get_rules(): if self.cf and "key" in rule: self.file_keys[self.cf]["keys"] += [rule] continue if "path" not in rule: continue if "keys" not in rule or not isinstance(rule["keys"], list): continue path = rule["path"] if path not in self.file_keys: self.file_keys[path] = { "target_n_key": {}, "keys": rule["keys"], } else: self.file_keys[path]["keys"] += rule["keys"] for path, data in self.file_keys.items(): for i, key in enumerate(data["keys"]): if data["keys"][i]['op'] == 'IN': data["keys"][i]['value'] = json.loads(data["keys"][i]['value']) if 'op' in key and 'key' in key and key['op'] not in ("unset", "reset"): if key['key'] not in data["target_n_key"]: data["target_n_key"][key['key']] = 1 else: data["target_n_key"][key['key']] += 1 try: data["conf"] = Parser(path) except ParserError as e: perror(e) raise ComplianceError() def fixable(self): return RET_OK def _check_key(self, path, data, keyname, target, op, value, instance=0, verbose=True): r = RET_OK if op == "reset": if value is not None: current_n_key = len(value) target_n_key = data["target_n_key"][keyname] if keyname in data["target_n_key"] else 0 if current_n_key > target_n_key: if verbose: perror("%s is set %d times, should be set %d times"%(keyname, current_n_key, target_n_key)) return RET_ERR else: if verbose: pinfo("%s is set %d times, on target"%(keyname, current_n_key)) return RET_OK else: return RET_OK elif op == "unset": if value is not None: if target.strip() == "": if verbose: perror("%s is set, should not be"%keyname) return RET_ERR target_found = False for i, val in enumerate(value): if target == val: target_found = True break if target_found: if verbose: perror("%s[%d] is set to value %s, should not be"%(keyname, i, target)) return RET_ERR else: if verbose: pinfo("%s is not set to value %s, on target"%(keyname, target)) return RET_OK else: if target.strip() != "": if verbose: pinfo("%s=%s is not set, on target"%(keyname, target)) else: if verbose: pinfo("%s is not set, on target"%keyname) return RET_OK if value is None: if op == 'IN' and "unset" in map(str, target): if verbose: pinfo("%s is not set, on target"%(keyname)) return RET_OK else: if verbose: perror("%s[%d] is not set, target: %s"%(keyname, instance, str(target))) return RET_ERR if type(value) == list: if str(target) in value: if verbose: pinfo("%s[%d]=%s on target"%(keyname, instance, str(value))) return RET_OK else: if verbose: perror("%s[%d]=%s is not set"%(keyname, instance, str(target))) return RET_ERR if op == '=': if str(value) != str(target): if verbose: perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) elif op == 'IN': if str(value) not in map(str, target): if verbose: perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) else: if type(value) != int: if verbose: perror("%s[%d]=%s value must be integer"%(keyname, instance, str(value))) r |= RET_ERR elif op == '<=' and value > target: if verbose: perror("%s[%d]=%s target: <= %s"%(keyname, instance, str(value), str(target))) r |= RET_ERR elif op == '>=' and value < target: if verbose: perror("%s[%d]=%s target: >= %s"%(keyname, instance, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s[%d]=%s on target"%(keyname, instance, str(value))) return r def check_key(self, path, data, key, instance=0, verbose=True): if 'key' not in key: if verbose: perror("'key' not set in rule %s"%str(key)) return RET_NA if 'value' not in key: if verbose: perror("'value' not set in rule %s"%str(key)) return RET_NA if 'op' not in key: op = "=" else: op = key['op'] target = key['value'] allowed_ops = ('>=', '<=', '=', 'unset', 'reset', 'IN') if op not in allowed_ops: if verbose: perror(key['key'], "'op' value must be one of", ", ".join(allowed_ops)) return RET_NA keyname = key['key'] value = data["conf"].get(keyname, instance=instance) r = self._check_key(path, data, keyname, target, op, value, instance=instance, verbose=verbose) return r def fix_key(self, path, data, key, instance=0): if key['op'] == "unset" or (key['op'] == "IN" and key['value'][0] == "unset"): pinfo("%s unset"%key['key']) if key['op'] == "IN": target = None else: target = key['value'] data["conf"].unset(key['key'], target) elif key['op'] == "reset": target_n_key = data["target_n_key"][key['key']] if key['key'] in data["target_n_key"] else 0 pinfo("%s truncated to %d definitions"%(key['key'], target_n_key)) data["conf"].truncate(key['key'], target_n_key) else: if key['op'] == "IN": target = key['value'][0] else: target = key['value'] pinfo("%s=%s set"%(key['key'], target)) data["conf"].set(key['key'], target, instance=instance) def check(self): r = RET_OK for path, data in self.file_keys.items(): r |= self.check_keys(path, data) return r def check_keys(self, path, data): r = RET_OK key_instance = {} for key in data["keys"]: if 'key' not in key or 'op' not in key: continue if key['op'] in ('reset', 'unset'): instance = None else: if key['key'] not in key_instance: key_instance[key['key']] = 0 else: key_instance[key['key']] += 1 instance = key_instance[key['key']] r |= self.check_key(path, data, key, instance=instance, verbose=True) return r def fix(self): r = RET_OK for path, data in self.file_keys.items(): r |= self.fix_keys(path, data) return r def fix_keys(self, path, data): key_instance = {} for key in data["keys"]: if 'key' not in key or 'op' not in key: continue if key['op'] in ('reset', 'unset'): instance = None else: if key['key'] not in key_instance: key_instance[key['key']] = 0 else: key_instance[key['key']] += 1 instance = key_instance[key['key']] if self.check_key(path, data, key, instance=instance, verbose=False) == RET_ERR: self.fix_key(path, data, key, instance=instance) if not data["conf"].changed: return RET_OK try: data["conf"].write() except ParserError as e: perror(e) return RET_ERR return RET_OK if __name__ == "__main__": main(KeyVal) opensvc-1.8~20170412/var/compliance/com.opensvc/nodeconf.py0000755000175000017500000001311413073467726023567 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_NODECONF_", "example_value": """ [ { "key": "node.repopkg", "op": "=", "value": "ftp://ftp.opensvc.com/opensvc" }, { "key": "node.repocomp", "op": "=", "value": "ftp://ftp.opensvc.com/compliance" } ] """, "description": """* Verify opensvc agent configuration parameter """, "form_definition": """ Desc: | A rule to set a parameter in OpenSVC node.conf configuration file. Used by the 'nodeconf' compliance object. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: nodeconf Inputs: - Id: key Label: Key DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: The OpenSVC node.conf parameter to check. - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter value. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The OpenSVC node.conf parameter value to check. """, } import os import sys import json import re from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class NodeConf(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.keys = self.get_rules() def fixable(self): return RET_OK def unset_val(self, keyname): cmd = ['nodemgr', 'unset', '--param', keyname] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() return p.returncode def set_val(self, keyname, target): if type(target) == int: target = str(target) cmd = ['nodemgr', 'set', '--param', keyname, '--value', target] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() return p.returncode def get_val(self, keyname): cmd = ['nodemgr', 'get', '--param', keyname] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: #perror('\n'.join((' '.join(cmd), out, err))) return if "deprecated" in err: return out = out.strip() try: out = int(out) except: pass return out def _check_key(self, keyname, target, op, value, verbose=True): r = RET_OK if value is None: if verbose: perror("%s not set"%keyname) r |= RET_ERR if op == '=': if str(value) != str(target): if verbose: perror("%s=%s, target: %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) elif op == 'unset': if verbose: perror("%s=%s value must be unset"%(keyname, str(value))) r |= RET_ERR else: if type(value) != int: if verbose: perror("%s=%s value must be integer"%(keyname, str(value))) r |= RET_ERR elif op == '<=' and value > target: if verbose: perror("%s=%s target: <= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif op == '>=' and value < target: if verbose: perror("%s=%s target: >= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) return r def check_key(self, key, verbose=True): if 'key' not in key: if verbose: perror("'key' not set in rule %s"%str(key)) return RET_NA if 'value' not in key: if verbose: perror("'value' not set in rule %s"%str(key)) return RET_NA if 'op' not in key: op = "=" else: op = key['op'] target = key['value'] if op not in ('>=', '<=', '=', 'unset'): if verbose: perror("'value' list member 0 must be either '=', '>=', '<=' or unset: %s"%str(key)) return RET_NA keyname = key['key'] value = self.get_val(keyname) if value is None: if op == 'unset': if verbose: pinfo("%s key is not set"%keyname) return RET_OK else: if verbose: perror("%s key is not set"%keyname) return RET_ERR return self._check_key(keyname, target, op, value, verbose) def fix_key(self, key): if 'op' not in key: op = "=" else: op = key['op'] if op == "unset": return self.unset_val(key['key']) else: return self.set_val(key['key'], key['value']) def check(self): r = 0 for key in self.keys: r |= self.check_key(key, verbose=True) return r def fix(self): r = 0 for key in self.keys: if self.check_key(key, verbose=False) == RET_ERR: r += self.fix_key(key) return r if __name__ == "__main__": main(NodeConf) opensvc-1.8~20170412/var/compliance/com.opensvc/firmware.py0000755000175000017500000001506313073467726023615 0ustar jkelbertjkelbert#!/usr/bin/env python import os import sys import json from distutils.version import LooseVersion as V from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class CompFirmware(object): def __init__(self, var): self.versions = {} if var not in os.environ: pinfo(var, 'not found in environment') raise NotApplicable() try: self.target_versions = json.loads(os.environ[var]) except: perror(var, 'misformatted variable:', os.environ[var]) raise NotApplicable() for key in self.target_versions: if type(self.target_versions[key]) != list: continue self.target_versions[key] = list(map(lambda x: str(x), self.target_versions[key])) self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname not in ['Linux']: perror('module not supported on', self.sysname) raise NotApplicable() def get_versions(self): self.get_bios_version_Linux() self.get_qla_version_Linux() self.get_lpfc_version_Linux() def get_qla_version_Linux(self): self.versions['qla2xxx'] = None self.versions['qla2xxx_fw'] = None import glob hosts = glob.glob('/sys/bus/pci/drivers/qla2*/*:*:*/host*') if len(hosts) == 0: return hosts_proc = map(lambda x: '/proc/scsi/qla2xxx/'+os.path.basename(x).replace('host', ''), hosts) hosts = map(lambda x: '/sys/class/fc_host/'+os.path.basename(x)+'/symbolic_name', hosts) for i, host in enumerate(hosts): if os.path.exists(host): with open(host, 'r') as f: buff = f.read() l = buff.split() for e in l: if e.startswith("DVR:"): self.versions['qla2xxx'] = e.replace("DVR:", "") elif e.startswith("FW:"): v = e.replace("FW:", "") # store the lowest firmware version if self.versions['qla2xxx_fw'] is None or V(self.versions['qla2xxx_fw']) > V(v): self.versions['qla2xxx_fw'] = v elif os.path.exists(hosts_proc[i]): with open(hosts_proc[i], 'r') as f: buff = f.read() for line in buff.split('\n'): if "Firmware version" not in line: continue l = line.split() n_words = len(l) idx = l.index("Driver") + 2 if idx <= n_words: self.versions['qla2xxx'] = l[idx] idx = l.index("Firmware") + 2 if idx <= n_words: v = l[idx] if self.versions['qla2xxx_fw'] is None or V(self.versions['qla2xxx_fw']) > V(v): self.versions['qla2xxx_fw'] = v def get_lpfc_version_Linux(self): self.versions['lpfc'] = None self.versions['lpfc_fw'] = None import glob hosts = glob.glob('/sys/class/scsi_host/host*/fwrev') if len(hosts) == 0: return for host in hosts: with open(host, 'r') as f: buff = f.read() l = buff.split() if self.versions['lpfc_fw'] is None or V(self.versions['lpfc_fw']) > V(l[0]): self.versions['lpfc_fw'] = l[0] if self.versions['lpfc_fw'] is None: # no need to fetch module version if no hardware return cmd = ['modinfo', 'lpfc'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: return out = bdecode(out) for line in out.splitlines(): if line.startswith('version:'): self.versions['lpfc'] = line.split()[1] return def get_bios_version_Linux(self): p = os.path.join(os.sep, 'sys', 'class', 'dmi', 'id', 'bios_version') try: f = open(p, 'r') ver = f.read().strip() f.close() self.versions['server'] = ver return except: pass try: cmd = ['dmidecode'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: raise out = bdecode(out) for line in out.splitlines(): if 'Version:' in line: self.versions['server'] = line.split(':')[-1].strip() return raise except: pinfo('can not fetch bios version') return def fixable(self): return RET_NA def check(self): self.get_versions() r = RET_OK for key in self.target_versions: if key not in self.versions: perror("TODO: get", key, "version") continue if type(self.versions[key]) not in (str, unicode): pinfo("no", key) continue if type(self.target_versions[key]) == list and \ self.versions[key] not in self.target_versions[key]: perror(key, "version is %s, target %s"%(self.versions[key], ' or '.join(self.target_versions[key]))) r |= RET_ERR elif type(self.target_versions[key]) != list and \ self.versions[key] != self.target_versions[key]: perror(key, "version is %s, target %s"%(self.versions[key], self.target_versions[key])) r |= RET_ERR else: pinfo(key, "version is %s, on target"%self.versions[key]) continue return r def fix(self): return RET_NA if __name__ == "__main__": syntax = """syntax: %s TARGET check|fixable|fix"""%sys.argv[0] if len(sys.argv) != 3: perror("wrong number of arguments") perror(syntax) sys.exit(RET_ERR) try: o = CompFirmware(sys.argv[1]) if sys.argv[2] == 'check': RET = o.check() elif sys.argv[2] == 'fix': RET = o.fix() elif sys.argv[2] == 'fixable': RET = o.fixable() else: perror("unsupported argument '%s'"%sys.argv[2]) perror(syntax) RET = RET_ERR except NotApplicable: sys.exit(RET_NA) except: import traceback traceback.print_exc() sys.exit(RET_ERR) sys.exit(RET) opensvc-1.8~20170412/var/compliance/com.opensvc/ansible_playbook.py0000755000175000017500000001224613073467726025316 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_ANSIBLE_PLAYBOOK_", "example_value": """ { "path": "/some/path/to/file", "fmt": "---", } """, "description": """* Fetch a playbook from a href if required * Run the playbook in check mode on check action * Run the playbook on fix action """, "form_definition": """ Desc: | Define or point to a ansible playbook. Css: comp48 Outputs: - Dest: compliance variable Class: file Type: json Format: dict Inputs: - Id: ref Label: Content URL pointer DisplayModeLabel: ref LabelCss: loc Help: "Examples: /path/to/reference_file http://server/path/to/reference_file https://server/path/to/reference_file ftp://server/path/to/reference_file ftp://login:pass@server/path/to/reference_file" Type: string - Id: fmt Label: Content DisplayModeLabel: fmt LabelCss: hd16 Css: pre Help: A reference content for the file. The text can embed substitution variables specified with %%ENV:VAR%%. Type: text """ } import os import sys import stat import re import tempfile from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class InitError(Exception): pass class AnsiblePlaybook(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.rules = [] self.inventory = os.path.join(os.environ["OSVC_PATH_COMP"], ".ansible-inventory") for rule in self.get_rules(): try: self.rules += self.add_rule(rule) except InitError: continue except ValueError: perror('ansible_playbook: failed to parse variable', os.environ[k]) if len(self.rules) == 0: raise NotApplicable() def add_rule(self, d): if 'fmt' not in d and 'ref' not in d: perror('file: fmt or ref should be in the dict:', d) RET = RET_ERR return [] if 'fmt' in d and 'ref' in d: perror('file: fmt and ref are exclusive:', d) RET = RET_ERR return [] return [d] def download(self, d): if 'ref' in d and d['ref'].startswith("safe://"): return self.get_safe_file(d["ref"]) elif 'fmt' in d and d['fmt'] != "": return self.write_fmt(d) else: return self.download_url() def download_url(self, d): f = tempfile.NamedTemporaryFile() tmpf = f.name f.close() try: self.urlretrieve(d['ref'], tmpf) except IOError as e: perror("file ref", d['ref'], "download failed:", e) raise InitError() return tmpf def get_safe_file(self, uuid): tmpf = tempfile.NamedTemporaryFile() tmpfname = tmpf.name tmpf.close() try: self.collector_safe_file_download(uuid, tmpfname) except Exception as e: raise ComplianceError("%s: %s" % (uuid, str(e))) return tmpfname def write_fmt(self, f): tmpf = tempfile.NamedTemporaryFile() tmpfname = tmpf.name tmpf.close() with open(tmpfname, 'w') as tmpf: tmpf.write(f['fmt']) return tmpfname def write_inventory(self): if os.path.exists(self.inventory): return with open(self.inventory, 'w') as ofile: ofile.write("[local]\n127.0.0.1\n") def fixable(self): return RET_NA def fix_playbook(self, rule, verbose=False): tmpfname = self.download(rule) try: return self._fix_playbook(rule, tmpfname, verbose=verbose) finally: os.unlink(tmpfname) def _fix_playbook(self, rule, tmpfname, verbose=False): self.write_inventory() cmd = ["ansible-playbook", "-c", "local", "-i", self.inventory, tmpfname] proc = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() pinfo(out) perror(err) if proc.returncode != 0: return RET_ERR if "failed=0" in out: return RET_OK return RET_ERR def check_playbook(self, rule, verbose=False): tmpfname = self.download(rule) try: return self._check_playbook(rule, tmpfname, verbose=verbose) finally: os.unlink(tmpfname) def _check_playbook(self, rule, tmpfname, verbose=False): self.write_inventory() cmd = ["ansible-playbook", "-c", "local", "-i", self.inventory, "--check", tmpfname] proc = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() pinfo(out) perror(err) if proc.returncode != 0: return RET_ERR if "changed=0" in out and "failed=0" in out: return RET_OK return RET_ERR def check(self): r = 0 for rule in self.rules: r |= self.check_playbook(rule, verbose=True) return r def fix(self): r = 0 for rule in self.rules: r |= self.fix_playbook(rule) return r if __name__ == "__main__": main(AnsiblePlaybook) opensvc-1.8~20170412/var/compliance/com.opensvc/zpool.py0000755000175000017500000001265213073467726023145 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_ZPOOL_", "example_value": """ [ { "name": "rpool", "prop": "failmode", "op": "=", "value": "continue" }, { "name": "rpool", "prop": "dedupditto", "op": "<", "value": 1 }, { "name": "rpool", "prop": "dedupditto", "op": ">", "value": 0 }, { "name": "rpool", "prop": "dedupditto", "op": "<=", "value": 1 }, { "name": "rpool", "prop": "dedupditto", "op": ">=", "value": 1 } ] """, "description": """* Check the properties values against their target and operator * The collector provides the format with wildcards. * The module replace the wildcards with contextual values. * In the 'fix' the zpool property is set. """, "form_definition": """ Desc: | A rule to set a list of zpool properties. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: zpool Inputs: - Id: name Label: Pool Name DisplayModeLabel: poolname LabelCss: hd16 Mandatory: Yes Type: string Help: The zpool name whose property to check. - Id: prop Label: Property DisplayModeLabel: property LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property to check. Candidates: - readonly - autoexpand - autoreplace - bootfs - cachefile - dedupditto - delegation - failmode - listshares - listsnapshots - version - Id: op_s Key: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Type: info Default: "=" ReadOnly: yes Help: The comparison operator to use to check the property current value. Condition: "#prop IN readonly,autoexpand,autoreplace,bootfs,cachefile,delegation,failmode,listshares,listsnapshots" - Id: op_n Key: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" StrictCandidates: yes Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the property current value. Condition: "#prop IN version,dedupditto" - Id: value_readonly Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == readonly" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_autoexpand Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == autoexpand" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_autoreplace Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == autoreplace" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_delegation Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == delegation" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_listshares Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == listshares" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_listsnapshots Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == listsnapshots" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_failmode Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == failmode" StrictCandidates: yes Candidates: - "continue" - "wait" - "panic" - Id: value_bootfs Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == bootfs" - Id: value_cachefile Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zpool property target value. Condition: "#prop == cachefile" - Id: value_dedupditto Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: integer Help: The zpool property target value. Condition: "#prop == dedupditto" - Id: value_version Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: integer Help: The zpool property target value. Condition: "#prop == version" """ } import os import sys sys.path.append(os.path.dirname(__file__)) from zprop import * class CompZpool(CompZprop): def __init__(self, prefix='OSVC_COMP_ZPOOL_'): CompObject.__init__(self, prefix=prefix, data=data) self.zbin = "zpool" if __name__ == "__main__": main(CompZpool) opensvc-1.8~20170412/var/compliance/com.opensvc/sysvinit.py0000755000175000017500000001721113073467726023666 0ustar jkelbertjkelbert#!/usr/bin/env python from subprocess import * import os import sys import glob import re sys.path.append(os.path.dirname(__file__)) from comp import * class InitError(Exception): pass class UnknownService(Exception): pass class SetError(Exception): pass class SeqError(Exception): pass class DupError(Exception): pass class SysVInit(object): def __init__(self): self.load() def __str__(self): s = "" for svc in self.services: s += "%-20s %s\n"%(svc, ' '.join(map(lambda x: '%-4s'%x, str(self.services[svc])))) return s def get_svcname(self, s): _s = os.path.basename(s) _svcname = re.sub(r'^[SK][0-9]+', '', _s) _seq = re.sub(r'[KS](\d+).+', r'\1', _s) if _s[0] == 'S': _state = 'on' elif _s[0] == 'K': _state = 'off' else: raise InitError("unexepected service name: %s"%s) return _state, _seq, _svcname def load(self): self.services = {} self.levels = (0, 1, 2, 3, 4, 5, 6) default = "none" self.base_d = "/etc" self.init_d = self.base_d + "/init.d" if not os.path.exists(self.init_d): self.base_d = "/sbin" self.init_d = self.base_d + "/init.d" if not os.path.exists(self.init_d): raise InitError("init dir not found") for l in self.levels: for s in glob.glob("%s/rc%d.d/[SK]*"%(self.base_d, l)): state, seq, svc = self.get_svcname(s) if svc not in self.services: self.services[svc] = {seq: [default, default, default, default, default, default, default]} if seq not in self.services[svc]: self.services[svc][seq] = [default, default, default, default, default, default, default] self.services[svc][seq][l] = state def activate(self, service, levels, seq): for l in levels: self.activate_one(service, levels, seq) def activate_one(self, service, level, seq): if len(service) == 0: SetError("service is empty") start_l = "S%s%s"%(seq,service) svc_p = "../init.d/"+service os.chdir(self.base_d+"/rc%s.d"%level) g = glob.glob("[SK]*%s"%service) if len(g) > 0: cmd = ['rm', '-f'] + g pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() cmd = ['ln', '-sf', svc_p, start_l] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() def deactivate_one(self, service, level, seq): if len(service) == 0: SetError("service is empty") stop_l = "K%s%s"%(seq,service) svc_p = "../init.d/"+service os.chdir(self.base_d+"/rc%s.d"%level) g = glob.glob("[SK]*%s"%service) if len(g) > 0: cmd = ['rm', '-f'] + g pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() cmd = ['ln', '-sf', svc_p, stop_l] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() def delete_one(self, service, level): if len(service) == 0: SetError("service is empty") g = glob.glob(self.base_d+"/rc%s.d"%level+"/*"+service) if len(g) == 0: return cmd = ['rm', '-f'] + g pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise SetError() def check_init(self, service): init_f = os.path.join(self.init_d, service) if os.path.exists(init_f): return True return False def set_state(self, service, level, state, seq): if service in self.services and seq in self.services[service]: curstates = self.services[service][seq] if state != "del" and len(curstates) == 1 and curstates[int(level)] == state or \ state == "del" and len(curstates) == 1 and curstates[int(level)] == "none": return if state == "on": self.activate_one(service, level, seq) elif state == "off": self.deactivate_one(service, level, seq) elif state == "del": self.delete_one(service, level) else: raise SetError() def get_state(self, service, level, seq): if service not in self.services: raise UnknownService() # compute the number of different launcher for this service in the runlevel l = [] for _seq in self.services[service]: if self.services[service][_seq][level] != "none": l.append(self.services[service][_seq][level]) if seq is None: if len(l) == 0: return "none" raise SeqError() if len(l) > 1: raise DupError() try: curstates = self.services[service][seq] curstate = curstates[int(level)] except: curstate = "none" if len(l) == 1 and curstate == "none": raise SeqError() return curstate def check_state(self, service, levels, state, seq=None, verbose=False): r = 0 if seq is not None and type(seq) == int: seq = "%02d"%seq if not self.check_init(service): if verbose: perror("service %s init script does not exist in %s"%(service, self.init_d)) r |= 1 if seq is None and state != "del": if verbose: perror("service %s sequence number must be set"%(service)) return 1 for level in levels: try: level = int(level) except: continue try: curstate = self.get_state(service, level, seq) except DupError: if verbose: perror("service %s has multiple launchers at level %d"%(service, level)) r |= 1 continue except SeqError: if verbose: perror("service %s sequence number error at level %d"%(service, level)) r |= 1 continue except UnknownService: curstate = "none" if (state != "del" and curstate != state) or \ (state == "del" and curstate != "none"): if verbose: perror("service", service, "at runlevel", level, "is in state", curstate, "! target state is", state) r |= 1 else: if verbose: pinfo("service", service, "at runlevel", level, "is in state", curstate) return r def fix_state(self, service, levels, state, seq=None): if seq is not None and type(seq) == int: seq = "%02d"%seq if seq is None and state != "del": perror("service %s sequence number must be set"%(service)) return 1 for level in levels: try: self.set_state(service, level, state, seq) except SetError: perror("failed to set", service, "runlevels") return 1 return 0 if __name__ == "__main__": o = SysVInit() pinfo(o) pinfo('xfs@rc3 =', o.get_state('xfs', 3)) opensvc-1.8~20170412/var/compliance/com.opensvc/remove_files.py0000755000175000017500000000447713073467726024467 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_REMOVE_FILES_", "example_value": """ [ "/tmp/foo", "/bar/to/delete" ] """, "description": """* Verify files and file trees are uninstalled """, "form_definition": """ Desc: | A rule defining a set of files to remove, fed to the 'remove_files' compliance object. Css: comp48 Outputs: - Dest: compliance variable Class: remove_files Type: json Format: list Inputs: - Id: path Label: File path DisplayModeLabel: "" LabelCss: edit16 Mandatory: Yes Help: You must set paths in fully qualified form. Type: string """, } import os import sys import re import json from glob import glob import shutil sys.path.append(os.path.dirname(__file__)) from comp import * blacklist = [ "/", "/root" ] class CompRemoveFiles(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): patterns = self.get_rules() patterns = sorted(list(set(patterns))) self.files = self.expand_patterns(patterns) if len(self.files) == 0: pinfo("no files matching patterns") raise NotApplicable def expand_patterns(self, patterns): l = [] for pattern in patterns: l += glob(pattern) return l def fixable(self): return RET_NA def check_file(self, _file): if not os.path.exists(_file): pinfo(_file, "does not exist. on target.") return RET_OK perror(_file, "exists. shouldn't") return RET_ERR def fix_file(self, _file): if not os.path.exists(_file): return RET_OK try: if os.path.isdir(_file) and not os.path.islink(_file): shutil.rmtree(_file) else: os.unlink(_file) pinfo(_file, "deleted") except Exception as e: perror("failed to delete", _file, "(%s)"%str(e)) return RET_ERR return RET_OK def check(self): r = 0 for _file in self.files: r |= self.check_file(_file) return r def fix(self): r = 0 for _file in self.files: r |= self.fix_file(_file) return r if __name__ == "__main__": main(CompRemoveFiles) opensvc-1.8~20170412/var/compliance/com.opensvc/bios.py0000755000175000017500000000416513073467726022736 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_BIOS_", "example_value": "0.6.0", "description": """* Checks an exact BIOS version, as returned by dmidecode or sysfs * Module need to be called with the exposed bios version as variable (bios.py $OSVC_COMP_TEST_BIOS_1 check) """, } import os import sys from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class CompBios(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.rules = self.get_rules_raw() self.sysname, self.nodename, x, x, self.machine = os.uname() if self.sysname not in ['Linux']: perror('module not supported on', self.sysname) raise NotApplicable() def get_bios_version_Linux(self): p = os.path.join(os.sep, 'sys', 'class', 'dmi', 'id', 'bios_version') try: f = open(p, 'r') ver = f.read().strip() f.close() return ver except: pass try: cmd = ['dmidecode', '-t', 'bios'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise out = bdecode(out) for line in out.splitlines(): if 'Version:' in line: return line.split(':')[-1].strip() raise except: perror('can not fetch bios version') return None return ver def fixable(self): return RET_NA def check(self): self.ver = self.get_bios_version_Linux() if self.ver is None: return RET_NA r = RET_OK for rule in self.rules: r |= self._check(rule) return r def _check(self, rule): if self.ver == rule: pinfo("bios version is %s, on target" % self.ver) return RET_OK perror("bios version is %s, target %s" % (self.ver, rule)) return RET_ERR def fix(self): return RET_NA if __name__ == "__main__": main(CompBios) opensvc-1.8~20170412/var/compliance/com.opensvc/self.signed.cert.py0000755000175000017500000001237113073467726025135 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_CERT_", "example_value": """ { "CN": "%%ENV:SERVICES_SVCNAME%%", "crt": "/srv/%%ENV:SERVICES_SVCNAME%%/data/nginx/conf/ssl/server.crt", "key": "/srv/%%ENV:SERVICES_SVCNAME%%/data/nginx/conf/ssl/server.key", "bits": 2048, "C": "FR", "ST": "Ile de France", "L": "Paris", "O": "OpenSVC", "OU": "Lab", "emailAddress": "support@opensvc.com", "alt_names": [ { "dns": "" } ] } """, "description": """* Check the existance of a key/crt pair * Create the key/crt pair """, "form_definition": """ Desc: | Describe a self-signed certificate Css: comp48 Outputs: - Dest: compliance variable Type: json Format: dict Class: authkey Inputs: - Id: CN Label: Common name DisplayModeLabel: cn LabelCss: loc Mandatory: Yes Type: string - Id: crt Label: Cert path DisplayModeLabel: crt LabelCss: key Mandatory: Yes Type: string Help: Where to install the generated certificate - Id: key Label: Key path DisplayModeLabel: key LabelCss: key Mandatory: Yes Type: string Help: Where to install the generated key - Id: bits Label: Bits DisplayModeLabel: bits LabelCss: key Mandatory: Yes Type: integer Default: 2048 Help: Defines the key length in bits - Id: C Label: Country name DisplayModeLabel: country LabelCss: loc Mandatory: Yes Default: FR Type: string - Id: ST Label: State or Province DisplayModeLabel: state LabelCss: loc Mandatory: Yes Default: Ile de France Type: string - Id: L Label: Locality name DisplayModeLabel: locality LabelCss: loc Mandatory: Yes Default: Paris Type: string - Id: O Label: Organization name DisplayModeLabel: org LabelCss: loc Mandatory: Yes Default: OpenSVC Type: string - Id: OU Label: Organization unit DisplayModeLabel: org unit LabelCss: loc Mandatory: Yes Default: IT Type: string - Id: emailAddress Label: Email address DisplayModeLabel: email LabelCss: loc Mandatory: Yes Default: admin@opensvc.com Type: string - Id: alt_names Label: Alternate names DisplayModeLabel: alt names LabelCss: loc Type: form Form: self.signed.cert.alt_names Default: [] Subform: Desc: | Subform for the self.signed.cert form. Css: comp48 Outputs: - Type: json Format: list of dict Inputs: - Id: dns Label: DNS DisplayModeLabel: dns LabelCss: loc Type: string Help: An alternate service name """ } import os import sys sys.path.append(os.path.dirname(__file__)) from comp import * from utilities import which from subprocess import * class CompSelfSignedCert(CompObject): def __init__(self, prefix='OSVC_COMP_CERT_'): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.rules = self.get_rules() if which("openssl") is None: raise NotApplicable("openssl command not found") def check(self): r = 0 for rule in self.rules: r |= self.check_rule(rule) return r def fix(self): r = 0 for rule in self.rules: r |= self.fix_rule(rule) return r def check_rule(self, rule): r = RET_OK if not os.path.exists(rule["key"]): perror("key %s does not exist" % rule["key"]) r = RET_ERR else: pinfo("key %s exists" % rule["key"]) if not os.path.exists(rule["crt"]): perror("crt %s does not exist" % rule["crt"]) r = RET_ERR else: pinfo("crt %s exists" % rule["crt"]) return r def fix_rule(self, rule): if os.path.exists(rule["key"]) and os.path.exists(rule["crt"]): return RET_OK for k in ("key", "crt"): d = os.path.dirname(rule[k]) if not os.path.isdir(d): if os.path.exists(d): perror("%s exists but is not a directory" % d) return RET_ERR else: pinfo("mkdir -p %s" %d) os.makedirs(d) l = [""] for k in ["C", "ST", "L", "O", "OU", "CN", "emailAddress"]: l.append(k+"="+rule[k]) if "alt_names" in rule and len(rule["alt_names"]) > 0: dns = [] for i, d in enumerate(rule["alt_names"]): dns.append("DNS.%d=%s" % (i+1, d["DNS"])) l.append("subjectAltName="+",".join(dns)) l.append("") cmd = ["openssl", "req", "-x509", "-nodes", "-newkey", "rsa:%d" % rule["bits"], "-keyout", rule["key"], "-out", rule["crt"], "-days", "XXX", "-subj", "%s" % "/".join(l)] pinfo(" ".join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: if len(out) > 0: pinfo(out) if len(err) > 0: perror(err) return RET_ERR return RET_OK if __name__ == "__main__": main(CompSelfSignedCert) opensvc-1.8~20170412/var/compliance/com.opensvc/zfs.py0000755000175000017500000002115613073467726022603 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_ZFS_", "example_value": """ [ { "name": "rpool/swap", "prop": "aclmode", "op": "=", "value": "discard" }, { "name": "rpool/swap", "prop": "copies", "op": "<", "value": 1 }, { "name": "rpool/swap", "prop": "copies", "op": ">", "value": 0 }, { "name": "rpool/swap", "prop": "copies", "op": "<=", "value": 1 }, { "name": "rpool/swap", "prop": "copies", "op": ">=", "value": 1 } ] """, "description": """* Check the properties values against their target and operator * The collector provides the format with wildcards. * The module replace the wildcards with contextual values. * In the 'fix' the zfs dataset property is set. """, "form_definition": """ Desc: | A rule to set a list of zfs properties. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: zfs dataset Inputs: - Id: name Label: Dataset Name DisplayModeLabel: dsname LabelCss: hd16 Mandatory: Yes Type: string Help: The zfs dataset name whose property to check. - Id: prop Label: Property DisplayModeLabel: property LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property to check. Candidates: - aclinherit - aclmode - atime - canmount - checksum - compression - copies - dedup - devices - exec - keychangedate - keysource - logbias - mountpoint - nbmand - primarycache - quota - readonly - recordsize - refquota - refreservation - rekeydate - reservation - rstchown - secondarycache - setuid - share.* - snapdir - sync - vscan - xattr - zoned - Id: op_s Key: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Type: info Default: "=" ReadOnly: yes Help: The comparison operator to use to check the property current value. Condition: "#prop != copies" - Id: op_n Key: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" StrictCandidates: yes Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the property current value. Condition: "#prop == copies" - Id: value_on_off Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop IN sharenfs,sharesmb" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_on_off_strict Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop IN canmount,atime,readonly,exec,devices,setuid,vscan,xattr,jailed,utf8only" StrictCandidates: yes Candidates: - "on" - "off" - Id: value_n Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: integer Help: The zfs dataset property target value. Condition: "#prop IN copies,recordsize,volsize" - Id: value_s Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop NOT IN normalization,casesensitivity,sync,volmode,logbias,snapdir,dedup,primarycache,secondarycache,redundant_metadata,checksum,compression,aclinherit,aclmode,copies,recordsize,volsize,canmount,atime,readonly,exec,devices,setuid,vscan,xattr,jailed,utf8only,sharenfs,sharesmb" - Id: value_aclinherit Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == aclinherit" StrictCandidates: yes Candidates: - "discard" - "noallow" - "restricted" - "passthrough" - "passthrough-x" - Id: value_aclmode Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == aclmode" StrictCandidates: yes Candidates: - "discard" - "groupmask" - "passthrough" - "restricted" - Id: value_checksum Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == checksum" StrictCandidates: yes Candidates: - "on" - "off" - "fletcher2" - "fletcher4" - "sha256" - "noparity" - Id: value_compression Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == compression" StrictCandidates: yes Candidates: - "on" - "off" - "lzjb" - "gzip" - "gzip-1" - "gzip-2" - "gzip-3" - "gzip-4" - "gzip-5" - "gzip-6" - "gzip-7" - "gzip-8" - "gzip-9" - "zle" - "lz4" - Id: value_dedup Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == dedup" StrictCandidates: yes Candidates: - "on" - "off" - "verify" - "sha256" - "sha256,verify" - Id: value_primarycache Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop IN primarycache,secondarycache" StrictCandidates: yes Candidates: - "all" - "none" - "metadata" - Id: value_redundant_metadata Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == redundant_metadata" StrictCandidates: yes Candidates: - "all" - "most" - Id: value_logbias Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == logbias" StrictCandidates: yes Candidates: - "latency" - "throughput" - Id: value_snapdir Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == snapdir" StrictCandidates: yes Candidates: - "hidden" - "visible" - Id: value_sync Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == sync" StrictCandidates: yes Candidates: - "standard" - "always" - "disabled" - Id: value_volmode Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == volmode" StrictCandidates: yes Candidates: - "default" - "geom" - "dev" - "none" - Id: value_casesensitivity Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == casesensitivity" StrictCandidates: yes Candidates: - "sensitive" - "insensitive" - "mixed" - Id: value_normalization Key: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string Help: The zfs dataset property target value. Condition: "#prop == normalization" StrictCandidates: yes Candidates: - "none" - "formC" - "formD" - "formKC" - "formKD" """ } import os import sys sys.path.append(os.path.dirname(__file__)) from zprop import * class CompZfs(CompZprop): def __init__(self, prefix='OSVC_COMP_ZFS_'): CompObject.__init__(self, prefix=prefix, data=data) self.zbin = "zfs" if __name__ == "__main__": main(CompZfs) opensvc-1.8~20170412/var/compliance/com.opensvc/keyval_parser.py0000755000175000017500000001360313073467726024646 0ustar jkelbertjkelbert#!/usr/bin/env python import os import sys import datetime import shutil sys.path.append(os.path.dirname(__file__)) from comp import * class ParserError(Exception): pass class Parser(object): def __init__(self, path, section_markers=None): self.path = path self.data = {} self.changed = False self.nocf = False self.keys = [] self.sections = {} self.section_names = [] self.lastkey = '__lastkey__' self.comments = {self.lastkey: []} if section_markers: self.section_markers = section_markers else: self.section_markers = ["Match"] self.load() self.bkp = path + '.' + str(datetime.datetime.now()) def __str__(self): s = "" for k in self.keys: if k in self.comments: s += '\n'.join(self.comments[k]) + '\n' s += '\n'.join([k + " " + str(v) for v in self.data[k]]) + '\n' if len(self.comments[self.lastkey]) > 0: s += '\n'.join(self.comments[self.lastkey]) for section, data in self.sections.items(): s += section + '\n' for k in data["keys"]: for v in data["data"][k]: s += "\t" + k + " " + str(v) + '\n' return s def truncate(self, key, max): if key not in self.data: return n = len(self.data[key]) if n <= max: return self.data[key] = self.data[key][:max] self.changed = True def set(self, key, value, instance=0): if key not in self.data: self.data[key] = [value] self.keys.append(key) elif instance >= len(self.data[key]): extra = instance + 1 - len(self.data[key]) for i in range(len(self.data[key]), instance-1): self.data[key].append(None) self.data[key].append(value) else: self.data[key].insert(instance, value) self.changed = True def unset(self, key, value=None): if key in self.data: if value is not None and value.strip() != "": self.data[key].remove(value) else: self.data[key] = [] if len(self.data[key]) == 0: del(self.data[key]) if key in self.keys: self.keys.remove(key) self.changed = True def get(self, key, instance=0): if key not in self.data: return if instance is None: return self.data[key] if instance < len(self.data[key]): return self.data[key][instance] return def load(self): if not os.path.exists(self.path): raise ParserError("%s does not exist"%self.path) self.nocf = True return with open(self.path, 'r') as f: buff = f.read() self.parse(buff) def backup(self): if self.nocf: return try: shutil.copy(self.path, self.bkp) except Exception as e: perror(e) raise ParserError("failed to backup %s"%self.path) pinfo("%s backup up as %s" % (self.path, self.bkp)) def restore(self): if self.nocf: return try: shutil.copy(self.bkp, self.path) except: raise ParserError("failed to restore %s"%self.path) pinfo("%s restored from %s" % (self.path, self.bkp)) def write(self): self.backup() try: with open(self.path, 'w') as f: f.write(str(self)) pinfo("%s rewritten"%self.path) except Exception as e: perror(e) self.restore() raise ParserError() def parse(self, buff): section = None for line in buff.split("\n"): line = line.strip() # store comment line and continue if line.startswith('#') or len(line) == 0: self.comments[self.lastkey].append(line) continue # strip end-of-line comment try: i = line.index('#') line = line[:i] line = line.strip() except ValueError: pass # discard empty line if len(line) == 0: continue l = line.split() if len(l) < 2: continue key = l[0] value = line[len(key):].strip() if key not in self.comments: self.comments[key] = self.comments[self.lastkey] else: self.comments[key] += self.comments[self.lastkey] self.comments[self.lastkey] = [] try: value = int(value) except: pass if key in self.section_markers: section = key + " " + value if section not in self.sections: self.sections[section] = {"keys": [], "data": {}} self.section_names.append(section) continue if section: if key not in self.sections[section]["keys"]: self.sections[section]["keys"].append(key) if key not in self.sections[section]["data"]: self.sections[section]["data"][key] = [] self.sections[section]["data"][key].append(value) else: if key not in self.keys: self.keys.append(key) if key not in self.data: self.data[key] = [] self.data[key].append(value) if __name__ == "__main__": if len(sys.argv) != 2: perror("wrong number of arguments") sys.exit(1) o = Parser(sys.argv[1]) o.get("Subsystem") o.set("Subsystem", "foo") o.unset("PermitRootLogin") o.backup() pinfo(o) opensvc-1.8~20170412/var/compliance/com.opensvc/yes0000755000175000017500000000017113073467726022144 0ustar jkelbertjkelbert#!/usr/bin/env python from __future__ import print_function try: while True: print("yes") except: pass opensvc-1.8~20170412/var/compliance/com.opensvc/vuln.py0000755000175000017500000005512013073467726022763 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_VULN_", "example_value": """ [ { "pkgname": "kernel", "minver": "2.6.18-238.19.1.el5", "firstver": "2.6.18-238" }, { "pkgname": "kernel-xen", "minver": "2.6.18-238.19.1.el5" } ] """, "description": """* Raise an alert if an installed package version is in a version range * If the package is not installed, do not raise an alert """, "form_definition": """ Desc: | A rule defining a list of vulnerable packages and their minimum release version fixing the vulnerability. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: vuln Inputs: - Id: pkgname Label: Package name DisplayModeLabel: pkgname LabelCss: pkg16 Mandatory: Yes Type: string Help: The package name, as known to the target system's package manager. - Id: firstver Label: First vulnerable version DisplayModeLabel: firstver LabelCss: pkg16 Mandatory: No Type: string Help: The first vulnerable package version. In the security context, the package version introducing the vulnerability. - Id: minver Label: Minimum version DisplayModeLabel: minver LabelCss: pkg16 Mandatory: Yes Type: string Help: The package minimum version. In the security context, the package version fixing the vulnerability. """ } import os import sys import json import pwd import sys import re import tempfile from subprocess import * from distutils.version import LooseVersion as V from utilities import which sys.path.append(os.path.dirname(__file__)) from comp import * def repl(matchobj): return '.0'+matchobj.group(0)[1:] class LiveKernVulnerable(Exception): pass class CompVuln(CompObject): def __init__(self, prefix=None, uri=None): CompObject.__init__(self, prefix=prefix, data=data) self.uri = uri def init(self): self.highest_avail_version = "0" self.fix_list = [] self.need_pushpkg = False self.sysname, self.nodename, x, x, self.machine = os.uname() if 'OSVC_COMP_VULN_STRICT' in os.environ and \ os.environ['OSVC_COMP_VULN_STRICT'] == "true": self.strict = True else: self.strict = False if 'OSVC_COMP_VULN_PKG_TYPE' in os.environ and \ os.environ['OSVC_COMP_VULN_PKG_TYPE'] == "bundle": self.pkg_type = 'bundle' else: self.pkg_type = 'product' self.packages = [] for k, rule in self.get_rule_items(): try: self.packages += self.add_rule(k, rule) except InitError: continue except ValueError: perror('failed to parse variable', os.environ[k]) if len(self.packages) == 0: raise NotApplicable() if self.sysname not in ['Linux', 'HP-UX', 'AIX', 'SunOS']: perror('module not supported on', self.sysname) raise NotApplicable() if 'OSVC_COMP_NODES_OS_VENDOR' not in os.environ: perror("OS_VENDOR is not set. Check your asset") raise NotApplicable() vendor = os.environ['OSVC_COMP_NODES_OS_VENDOR'] if vendor in ['Debian', 'Ubuntu']: self.get_installed_packages = self.deb_get_installed_packages self.fix_pkg = self.apt_fix_pkg self.fixable_pkg = self.apt_fixable_pkg self.fix_all = None elif vendor in ['CentOS', 'Redhat', 'Red Hat'] or \ (vendor == 'Oracle' and self.sysname == 'Linux'): self.get_installed_packages = self.rpm_get_installed_packages self.fix_pkg = self.yum_fix_pkg self.fixable_pkg = self.yum_fixable_pkg self.fix_all = None elif vendor in ['SuSE']: self.get_installed_packages = self.rpm_get_installed_packages self.fix_pkg = self.zyp_fix_pkg self.fixable_pkg = self.zyp_fixable_pkg self.fix_all = None elif vendor in ['HP']: if self.uri is None: perror("URI is not set") raise NotApplicable() self.get_installed_packages = self.hp_get_installed_packages self.fix_pkg = self.hp_fix_pkg self.fixable_pkg = self.hp_fixable_pkg self.fix_all = self.hp_fix_all elif vendor in ['IBM']: self.get_installed_packages = self.aix_get_installed_packages self.fix_pkg = self.aix_fix_pkg self.fixable_pkg = self.aix_fixable_pkg self.fix_all = None elif vendor in ['Oracle']: self.get_installed_packages = self.sol_get_installed_packages self.fix_pkg = self.sol_fix_pkg self.fixable_pkg = self.sol_fixable_pkg self.fix_all = None else: perror(vendor, "not supported") raise NotApplicable() self.installed_packages = self.get_installed_packages() def add_rule(self, k, o): o["rule"] = k.replace("OSVC_COMP_", "") return [o] def get_free(self, c): if not os.path.exists(c): return 0 cmd = ["df", "-k", c] p = Popen(cmd, stdout=PIPE, stderr=None) out, err = p.communicate() out = bdecode(out) for line in out.split(): if "%" in line: l = out.split() for i, w in enumerate(l): if '%' in w: break try: f = int(l[i-1]) return f except: return 0 return 0 def get_temp_dir(self): if hasattr(self, "tmpd"): return self.tmpd candidates = ["/tmp", "/var/tmp", "/root"] free = {} for c in candidates: free[self.get_free(c)] = c max = sorted(free.keys())[-1] self.tmpd = free[max] pinfo("selected %s as temp dir (%d KB free)" % (self.tmpd, max)) return self.tmpd def download(self, pkg_name): import urllib import tempfile f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()) dname = f.name f.close() try: os.makedirs(dname) except: pass fname = os.path.join(dname, "file") try: self.urlretrieve(pkg_name, fname) except IOError: try: os.unlink(fname) os.unlink(dname) except: pass raise Exception("download failed: %s" % str(e)) import tarfile os.chdir(dname) try: tar = tarfile.open(fname) except: pinfo("not a tarball") return fname try: tar.extractall() except: try: os.unlink(fname) os.unlink(dname) except: pass # must be a pkg return dname tar.close() os.unlink(fname) return dname def get_os_ver(self): cmd = ['uname', '-v'] p = Popen(cmd, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: return 0 out = bdecode(out) lines = out.splitlines() if len(lines) == 0: return 0 try: osver = float(lines[0]) except: osver = 0 return osver def sol_fix_pkg(self, pkg): r = self.check_pkg(pkg) if r == RET_OK: return RET_NA if 'repo' not in pkg or len(pkg['repo']) == 0: perror("no repo specified in the rule") return RET_NA pkg_url = pkg['repo']+"/"+pkg['pkgname'] pinfo("download", pkg_url) try: dname = self.download(pkg_url) except Exception as e: perror(e) return RET_ERR if pkg["pkgname"] in self.installed_packages: os.chdir("/") yes = os.path.dirname(__file__) + "/yes" cmd = '%s | pkgrm %s' % (yes, pkg['pkgname']) print(cmd) r = os.system(cmd) if r != 0: return RET_ERR if os.path.isfile(dname): d = dname else: d = "." os.chdir(dname) if self.get_os_ver() < 10: opts = '' else: opts = '-G' if 'resp' in pkg and len(pkg['resp']) > 0: f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir()) resp = f.name f.close() with open(resp, "w") as f: f.write(pkg['resp']) else: resp = "/dev/null" yes = os.path.dirname(__file__) + "/yes" cmd = '%s | pkgadd -r %s %s -d %s all' % (yes, resp, opts, d) print(cmd) r = os.system(cmd) os.chdir("/") if os.path.isdir(dname): import shutil shutil.rmtree(dname) if r != 0: return RET_ERR return RET_OK def sol_fixable_pkg(self, pkg): return 0 def sol_fix_all(self): return RET_NA def sol_get_installed_packages(self): p = Popen(['pkginfo', '-l'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return {} out = bdecode(out) return self.sol_parse_pkginfo(out) def sol_parse_pkginfo(self, out): l = {} for line in out.split('\n'): v = line.split(':') if len(v) != 2: continue f = v[0].strip() if f == "PKGINST": pkgname = v[1].strip() elif f == "ARCH": pkgarch = v[1].strip() elif f == "VERSION": pkgvers = v[1].strip() if pkgname in l: l[pkgname] += [(pkgvers, pkgarch)] else: l[pkgname] = [(pkgvers, pkgarch)] return l def aix_fix_pkg(self, pkg): r = self.check_pkg(pkg) if r == RET_OK: return RET_NA cmd = ['nimclient', '-o', 'cust', '-a', 'lpp_source=%s'%self.uri, '-a', 'installp_flags=aFQY', '-a', 'filesets=%s'%pkg['pkgname']] s = " ".join(cmd) pinfo(s) r = os.system(s) if r != 0: return RET_ERR return RET_OK def aix_fixable_pkg(self, pkg): return RET_NA def aix_fix_all(self): return RET_NA def aix_get_installed_packages(self): p = Popen(['lslpp', '-L', '-c'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return {} out = bdecode(out) return self.aix_parse_lslpp(out) def aix_parse_lslpp(self, out): l = {} for line in out.split('\n'): if line.startswith('#') or len(line) == 0: continue v = line.split(':') if len(v) < 3: continue pkgname = v[1].replace('-'+v[2], '') if pkgname in l: l[pkgname] += [(v[2], "")] else: l[pkgname] = [(v[2], "")] return l def hp_fix_pkg(self, pkg): if self.check_pkg(pkg, verbose=False) == RET_OK: return RET_OK if self.fixable_pkg(pkg) == RET_ERR: return RET_ERR if self.highest_avail_version == "0": return RET_ERR if self.strict: self.fix_list.append(pkg["pkgname"]+',r='+pkg["minver"]) else: self.fix_list.append(pkg["pkgname"]+',r='+self.highest_avail_version) self.need_pushpkg = True self.installed_packages = self.get_installed_packages() return RET_OK def hp_fix_all(self): r = call(['swinstall', '-x', 'allow_downdate=true', '-x', 'autoreboot=true', '-x', 'mount_all_filesystems=false', '-s', self.uri] + self.fix_list) if r != 0: return RET_ERR return RET_OK def hp_fixable_pkg(self, pkg): self.highest_avail_version = "0" if self.check_pkg(pkg, verbose=False) == RET_OK: return RET_OK cmd = ['swlist', '-l', self.pkg_type, '-s', self.uri, pkg['pkgname']] p = Popen(cmd, stdout=PIPE, stderr=PIPE) (out, err) = p.communicate() if p.returncode != 0: if "not found on host" in err: perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver'])) else: perror('can not fetch available packages list') return RET_ERR out = bdecode(out) l = self.hp_parse_swlist(out) if len(l) == 0: perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver'])) return RET_ERR for v in map(lambda x: x[0], l.values()[0]): if V(v) > V(self.highest_avail_version): self.highest_avail_version = v if V(self.highest_avail_version) < V(pkg['minver']): perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver'])) return RET_ERR return RET_OK def hp_get_installed_packages(self): p = Popen(['swlist', '-l', self.pkg_type], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return {} out = bdecode(out) return self.hp_parse_swlist(out) def hp_parse_swlist(self, out): l = {} for line in out.split('\n'): if line.startswith('#') or len(line) == 0: continue v = line.split() if len(v) < 2: continue if v[0] in l: l[v[0]] += [(v[1], "")] else: l[v[0]] = [(v[1], "")] return l def rpm_get_installed_packages(self): p = Popen(['rpm', '-qa', '--qf', '%{NAME} %{VERSION}-%{RELEASE} %{ARCH}\n'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return {} l = {} out = bdecode(out) for line in out.splitlines(): v = line.split(' ') if len(v) != 3: continue if v[0] in l: l[v[0]] += [(v[1], v[2])] else: l[v[0]] = [(v[1], v[2])] return l def deb_get_installed_packages(self): p = Popen(['dpkg', '-l'], stdout=PIPE) (out, err) = p.communicate() if p.returncode != 0: perror('can not fetch installed packages list') return {} l = {} out = bdecode(out) for line in out.splitlines(): if not line.startswith('ii'): continue v = line.split()[1:3] pkgname = v[0] pkgname = pkgname.split(':')[0] l[pkgname] = [(v[1], "")] return l def apt_fixable_pkg(self, pkg): # TODO return RET_NA def zyp_fixable_pkg(self, pkg): return RET_NA def yum_fixable_pkg(self, pkg): try: r = self.check_pkg(pkg, verbose=False) except LiveKernVulnerable: r = RET_OK if r == RET_OK: return RET_OK cmd = ['yum', 'list', 'available', pkg['pkgname']] p = Popen(cmd, stdout=PIPE, stderr=PIPE) (out, err) = p.communicate() if p.returncode != 0: if "No matching Packages" in err: perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver'])) else: perror('can not fetch available packages list') return RET_ERR highest_avail_version = "0" out = bdecode(out) for line in out.splitlines(): l = line.split() if len(l) != 3: continue if V(l[1]) > V(highest_avail_version): highest_avail_version = l[1] if V(highest_avail_version) < V(pkg['minver']): perror('%s > %s not available in repositories'%(pkg['pkgname'], pkg['minver'])) return RET_ERR return RET_OK def tainted(self, pkg): if not pkg["pkgname"].startswith("kernel-") and \ not pkg["pkgname"].startswith("linux-image"): return False if self.sysname != 'Linux': return False if not os.path.exists("/proc/sys/kernel/tainted"): return False with open("/proc/sys/kernel/tainted", "r") as f: buff = f.read() if buff == "0": return False return True def zyp_fix_pkg(self, pkg): try: r = self.check_pkg(pkg, verbose=False) except LiveKernVulnerable: r = RET_OK if r == RET_OK: return RET_OK if self.fixable_pkg(pkg) == RET_ERR: return RET_ERR r = call(['zypper', 'install', '-y', pkg["pkgname"]]) if r != 0: return RET_ERR self.need_pushpkg = True self.installed_packages = self.get_installed_packages() return RET_OK def yum_fix_pkg(self, pkg): try: r = self.check_pkg(pkg, verbose=False) except LiveKernVulnerable: r = RET_OK if r == RET_OK: return RET_OK if self.fixable_pkg(pkg) == RET_ERR: return RET_ERR r = call(['yum', '-y', 'install', pkg["pkgname"]]) if r != 0: return RET_ERR self.need_pushpkg = True self.installed_packages = self.get_installed_packages() return RET_OK def apt_fix_pkg(self, pkg): if self.check_pkg(pkg, verbose=False) == RET_OK: return RET_OK r = call(['apt-get', 'install', '-y', '--allow-unauthenticated', pkg["pkgname"]]) if r != 0: return RET_ERR self.need_pushpkg = True self.installed_packages = self.get_installed_packages() return RET_OK def get_raw_kver(self): return os.uname()[2] def get_kver(self): s = self.get_raw_kver() s = s.replace('xen', '') s = s.replace('hugemem', '') s = s.replace('smp', '') s = s.replace('PAE', '') s = s.replace('.x86_64','') s = s.replace('.i686','') return s def workaround_python_cmp(self, s): """ python list cmp says a > 9, but rpm says z < 0, ie : python says 2.6.18-238.el5 > 2.6.18-238.11.1.el5 which is wrong in the POV of the package manager. replace .[a-z]* by .00000000[a-z] to force the desired behaviour """ return re.sub("\.[a-zA-Z]+", repl, s) def check_pkg(self, pkg, verbose=True): if not pkg["pkgname"] in self.installed_packages: if verbose: pinfo(pkg["pkgname"], "is not installed (%s:not applicable)"%pkg["rule"]) return RET_OK name = pkg["pkgname"] if name.startswith("kernel"): if self.tainted(pkg): pinfo(name, "booted kernel is tainted", "(%s)"%pkg["rule"]) kver = self.get_raw_kver() for i in ('xen', 'hugemem', 'smp', 'PAE'): if kver.endswith(i) and name != "kernel-"+i: if verbose: pinfo(name, "bypassed :", i, "kernel booted", "(%s:not applicable)"%pkg["rule"]) return RET_OK r = RET_OK max = "0" max_v = V(max) ok = [] minver = self.workaround_python_cmp(pkg['minver']) target = V(minver) if 'firstver' in pkg and pkg['firstver'] != "": firstver = self.workaround_python_cmp(pkg['firstver']) else: firstver = "0" firstver_v = V(firstver) candidates = map(lambda x: [name]+list(x), self.installed_packages[name]) for _name, vers, arch in candidates: _vers = self.workaround_python_cmp(vers) actual = V(_vers) if actual > max_v or max == "0": max = vers max_v = actual if target <= actual or firstver_v > actual: ok.append((_name, vers, arch)) if max == "0": # not installed if verbose: pinfo(name, "is not installed (%s:not applicable)"%pkg["rule"]) return RET_OK if name.startswith("kernel"): kver = self.get_kver() if len(ok) == 0: if verbose: perror(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], candidates)), 'installed and vulnerable. upgrade to', pkg["minver"], "(%s:need upgrade)"%pkg["rule"]) return RET_ERR elif kver not in map(lambda x: x[1], ok): if verbose: perror(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], ok)), "installed and not vulnerable but vulnerable kernel", self.get_raw_kver(), "booted", "(%s:need reboot)"%pkg["rule"]) raise LiveKernVulnerable() else: if verbose: pinfo("kernel", self.get_raw_kver(), "installed, booted and not vulnerable", "(%s:not vulnerable)"%pkg["rule"]) return RET_OK if len(ok) > 0: if verbose: pinfo("%s installed and not vulnerable (%s:not vulnerable)"%(', '.join(map(lambda x: x[0]+"-"+x[1]+"."+x[2], ok)), pkg["rule"])) return RET_OK if verbose: perror('package', name+"-"+vers, 'is vulnerable. upgrade to', pkg["minver"], "(%s:need upgrade)"%pkg["rule"]) return RET_ERR def check(self): r = 0 for pkg in self.packages: try: _r = self.check_pkg(pkg) r |= _r except LiveKernVulnerable: r |= RET_ERR return r def fix(self): r = 0 for pkg in self.packages: if self.tainted(pkg): perror(name, "booted kernel is tainted. not safe to upgrade.", "(%s)"%pkg["rule"]) r |= self.fix_pkg(pkg) if self.fix_all is not None and len(self.fix_list) > 0: self.fix_all() if self.need_pushpkg: self.pushpkg() return r def pushpkg(self): bin = 'nodemgr' if which(bin) is None: return cmd = [bin, 'pushpkg'] pinfo(' '.join(cmd)) p = Popen(cmd) p.communicate() def fixable(self): r = 0 for pkg in self.packages: r |= self.fixable_pkg(pkg) return r if __name__ == "__main__": main(CompVuln) opensvc-1.8~20170412/var/compliance/com.opensvc/etcsystem.py0000755000175000017500000001547413073467726024027 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_ETCSYSTEM_", "example_value": """ [{"key": "fcp:fcp_offline_delay", "op": ">=", "value": 21}, {"key": "ssd:ssd_io_time", "op": "=", "value": "0x3C"}] """, "description": "Checks and setup values in /etc/system respecting strict targets or thresholds.", "form_definition": """ Desc: | A rule to set a list of Solaris kernel parameters to be set in /etc/system. Current values can be checked as strictly equal, or superior/inferior to their target value. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: etcsystem Inputs: - Id: key Label: Key DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: The /etc/system parameter to check. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The /etc/system parameter target value. - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter current value. """, } import os import sys from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class EtcSystem(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.keys = self.get_rules() if len(self.keys) == 0: raise NotApplicable() self.data = {} self.cf = os.path.join(os.sep, 'etc', 'system') self.load_file(self.cf) def fixable(self): return RET_OK def load_file(self, p): if not os.path.exists(p): perror(p, "does not exist") return with open(p, 'r') as f: buff = f.read() self.lines = buff.split('\n') for i, line in enumerate(self.lines): line = line.strip() if line.startswith('*'): continue if len(line) == 0: continue l = line.split() if l[0] != "set": continue if len(l) < 2: continue line = ' '.join(l[1:]).split('*')[0] var, val = line.split('=') var = var.strip() val = val.strip() try: val = int(val) except: pass if var in self.data: self.data[var].append([val, i]) else: self.data[var] = [[val, i]] def set_val(self, keyname, target, op): newline = 'set %s = %s'%(keyname, str(target)) if keyname not in self.data: pinfo("add '%s' to /etc/system"%newline) self.lines.insert(-1, newline + " * added by opensvc") else: ok = 0 for value, ref in self.data[keyname]: r = self._check_key(keyname, target, op, value, ref, verbose=False) if r == RET_ERR: pinfo("comment out line %d: %s"%(ref, self.lines[ref])) self.lines[ref] = '* '+self.lines[ref]+' * commented out by opensvc' else: ok += 1 if ok == 0: pinfo("add '%s' to /etc/system"%newline) self.lines.insert(-1, newline + " * added by opensvc") def get_val(self, keyname): if keyname not in self.data: return [] return self.data[keyname] def _check_key(self, keyname, target, op, value, ref, verbose=True): r = RET_OK if value is None: if verbose: perror("%s not set"%keyname) r |= RET_ERR if op == '=': if str(value) != str(target): if verbose: perror("%s=%s, target: %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) else: if type(value) != int: if verbose: perror("%s=%s value must be integer"%(keyname, str(value))) r |= RET_ERR elif op == '<=' and value > target: if verbose: perror("%s=%s target: <= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif op == '>=' and value < target: if verbose: perror("%s=%s target: >= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) return r def check_key(self, key, verbose=True): if 'key' not in key: if verbose: perror("'key' not set in rule %s"%str(key)) return RET_NA if 'value' not in key: if verbose: perror("'value' not set in rule %s"%str(key)) return RET_NA if 'op' not in key: op = "=" else: op = key['op'] target = key['value'] if op not in ('>=', '<=', '='): if verbose: perror("'value' list member 0 must be either '=', '>=' or '<=': %s"%str(key)) return RET_NA keyname = key['key'] data = self.get_val(keyname) if len(data) == 0: perror("%s key is not set"%keyname) return RET_ERR r = RET_OK ok = 0 for value, ref in data: r |= self._check_key(keyname, target, op, value, ref, verbose) if r == RET_OK: ok += 1 if ok > 1: perror("duplicate lines for key %s"%keyname) r |= RET_ERR return r def fix_key(self, key): self.set_val(key['key'], key['value'], key['op']) def check(self): r = 0 for key in self.keys: r |= self.check_key(key, verbose=True) return r def fix(self): for key in self.keys: if self.check_key(key, verbose=False) == RET_ERR: self.fix_key(key) if len(self.keys) > 0: import datetime backup = self.cf+str(datetime.datetime.now()) try: import shutil shutil.copy(self.cf, backup) except: perror("failed to backup %s"%self.cf) return RET_ERR try: with open(self.cf, 'w') as f: f.write('\n'.join(self.lines)) except: perror("failed to write %s"%self.cf) return RET_ERR return RET_OK if __name__ == "__main__": main(EtcSystem) opensvc-1.8~20170412/var/compliance/com.opensvc/svcconf.py0000755000175000017500000002341513073467726023442 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_GROUP_", "example_env": { "OSVC_COMP_SERVICES_SVCNAME": "testsvc", }, "example_value": """ [ { "value": "fd5373b3d938", "key": "container#1.run_image", "op": "=" }, { "value": "/bin/sh", "key": "container#1.run_command", "op": "=" }, { "value": "/opt/%%ENV:SERVICES_SVCNAME%%", "key": "DEFAULT.docker_data_dir", "op": "=" }, { "value": "no", "key": "container(type=docker).disable", "op": "=" }, { "value": 123, "key": "container(type=docker&&run_command=/bin/sh).newvar", "op": "=" } ] """, "description": """* Setup and verify parameters in a opensvc service configuration. """, "form_definition": """ Desc: | A rule to set a parameter in OpenSVC .conf configuration file. Used by the 'svcconf' compliance object. Css: comp48 Outputs: - Dest: compliance variable Type: json Format: list of dict Class: svcconf Inputs: - Id: key Label: Key DisplayModeLabel: key LabelCss: action16 Mandatory: Yes Type: string Help: The OpenSVC .conf parameter to check. - Id: op Label: Comparison operator DisplayModeLabel: op LabelCss: action16 Mandatory: Yes Type: string Default: "=" Candidates: - "=" - ">" - ">=" - "<" - "<=" Help: The comparison operator to use to check the parameter value. - Id: value Label: Value DisplayModeLabel: value LabelCss: action16 Mandatory: Yes Type: string or integer Help: The OpenSVC .conf parameter value to check. """, } import os import sys import json import re import copy from subprocess import * sys.path.append(os.path.dirname(__file__)) from comp import * class SvcConf(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.keys = [] if "OSVC_COMP_SERVICES_SVCNAME" not in os.environ: pinfo("SERVICES_SVCNAME is not set") raise NotApplicable() self.svcname = os.environ['OSVC_COMP_SERVICES_SVCNAME'] self.keys = self.get_rules() try: self.get_config_file(refresh=True) except Exception as e: perror("unable to load service configuration:", str(e)) raise ComplianceError() self.sanitize_keys() self.expand_keys() def get_config_file(self, refresh=False): if not refresh: return self.svc_config cmd = ['svcmgr', '-s', self.svcname, 'json_config'] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() out = bdecode(out) self.svc_config = json.loads(out) return self.svc_config def fixable(self): return RET_NA def set_val(self, keyname, target): if type(target) == int: target = str(target) cmd = ['svcmgr', '-s', self.svcname, 'set', '--param', keyname, '--value', target] pinfo(' '.join(cmd)) p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() return p.returncode def get_val(self, keyname): section, var = keyname.split('.') if section not in self.svc_config: return None return self.svc_config[section].get(var) def _check_key(self, keyname, target, op, value, verbose=True): r = RET_OK if value is None: if verbose: perror("%s not set"%keyname) r |= RET_ERR if op == '=': if str(value) != str(target): if verbose: perror("%s=%s, target: %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) else: if type(value) != int: if verbose: perror("%s=%s value must be integer"%(keyname, str(value))) r |= RET_ERR elif op == '<=' and value > target: if verbose: perror("%s=%s target: <= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif op == '>=' and value < target: if verbose: perror("%s=%s target: >= %s"%(keyname, str(value), str(target))) r |= RET_ERR elif verbose: pinfo("%s=%s on target"%(keyname, str(value))) return r def check_filter(self, section, filter): op = None i = 0 try: i = filter.index("&&") op = "and" except ValueError: pass try: i = filter.index("||") op = "or" except ValueError: pass if i == 0: _filter = filter _tail = "" else: _filter = filter[:i] _tail = filter[i:].lstrip("&&").lstrip("||") r = self._check_filter(section, _filter) #pinfo(" _check_filter('%s', '%s') => %s" % (section, _filter, str(r))) if op == "and": r &= self.check_filter(section, _tail) elif op == "or": r |= self.check_filter(section, _tail) return r def _check_filter(self, section, filter): if "~=" in filter: return self._check_filter_reg(section, filter) elif "=" in filter: return self._check_filter_eq(section, filter) perror("invalid filter syntax: %s" % filter) return False def _check_filter_eq(self, section, filter): l = filter.split("=") if len(l) != 2: perror("invalid filter syntax: %s" % filter) return False key, val = l cur_val = self.svc_config[section].get(key) if cur_val is None: return False if str(cur_val) == str(val): return True return False def _check_filter_reg(self, section, filter): l = filter.split("~=") if len(l) != 2: perror("invalid filter syntax: %s" % filter) return False key, val = l val = val.strip("/") cur_val = self.svc_config[section].get(key) if cur_val is None: return False reg = re.compile(val) if reg.match(cur_val): return True return False def resolve_sections(self, s, filter): """ s is a ressource section name (fs, container, app, sync, ...) filter is a regexp like expression container(type=docker) fs(mnt~=/.*tools/) container(type=docker&&run_image~=/opensvc\/collector_web:build.*/) fs(mnt~=/.*tools/||mnt~=/.*moteurs/) """ result = []; eligiblesections = []; for section in self.svc_config.keys(): if section.startswith(s+'#') or section == s: eligiblesections.append(section) for section in eligiblesections: if self.check_filter(section, filter): #pinfo(" =>", section, "matches filter") result.append(section) result.sort() return result def sanitize_keys(self, verbose=True): r = RET_OK for key in self.keys: if 'key' not in key: if verbose: perror("'key' not set in rule %s"%str(key)) r |= RET_NA if 'value' not in key: if verbose: perror("'value' not set in rule %s"%str(key)) r |= RET_NA if 'op' not in key: op = "=" else: op = key['op'] if op not in ('>=', '<=', '='): if verbose: perror("'value' list member 0 must be either '=', '>=' or '<=': %s"%str(key)) r |= RET_NA if r is not RET_OK: sys.exit(r) def expand_keys(self): expanded_keys = [] for key in self.keys: keyname = key['key'] target = key['value'] op = key['op'] sectionlist = []; reg1 = re.compile(r'(.*)\((.*)\)\.(.*)') reg2 = re.compile(r'(.*)\.(.*)') m = reg1.search(keyname) if m: section = m.group(1) filter = m.group(2) var = m.group(3) sectionlist = self.resolve_sections(section, filter) for resolvedsection in sectionlist: newdict = { 'key': '.'.join([resolvedsection, var]), 'op': op, 'value': target } expanded_keys.append(newdict) continue m = reg2.search(keyname) if m: section = m.group(1) var = m.group(2) expanded_keys.append(copy.copy(key)) continue # drop key self.keys = expanded_keys def check_key(self, key, verbose=True): op = key['op'] target = key['value'] keyname = key['key'] value = self.get_val(keyname) if value is None: if verbose: perror("%s key is not set"%keyname) return RET_ERR return self._check_key(keyname, target, op, value, verbose) def fix_key(self, key): return self.set_val(key['key'], key['value']) def check(self): r = 0 for key in self.keys: r |= self.check_key(key, verbose=True) return r def fix(self): r = 0 for key in self.keys: if self.check_key(key, verbose=False) == RET_ERR: r += self.fix_key(key) return r if __name__ == "__main__": main(SvcConf) opensvc-1.8~20170412/var/compliance/com.opensvc/xinetd.py0000755000175000017500000001261213073467726023271 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_XINETD_", "example_value": """ { "gssftp": { "disable": "no", "server_args": "-l -a -u 022" } }""", "description": """* Setup and verify a xinetd service configuration """, "form_definition": """ Desc: | A rule defining how a xinetd service should be configured Inputs: - Id: xinetdsvc Label: Service Name DisplayModeLabel: service LabelCss: action16 Mandatory: Yes Help: The xinetd service name, ie the service file name in /etc/xinetd.d/ Type: string - Id: disable Label: Disable DisplayModeLabel: Disable LabelCss: action16 Help: Defines if the xinetd service target state is enabled or disabled Type: string Default: yes Candidates: - "yes" - "no" - Id: server_args Label: Server Args DisplayModeLabel: args LabelCss: action16 Help: Command line parameter to pass to the service's server executable Type: string """, } import os import sys import json import pwd sys.path.append(os.path.dirname(__file__)) from comp import * class Xinetd(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.base = os.path.join(os.sep, "etc", "xinetd.d") if not os.path.exists(self.base): perror(self.base, 'does not exist') raise NotApplicable() self.svcs = {} for d in self.get_rules(): self.svcs.update(d) if len(self.svcs) == 0: raise NotApplicable() self.cf_d = {} self.known_props = ( "flags", "socket_type", "wait", "user", "server", "server_args", "disable") def fixable(self): return RET_NA def get_svc(self, svc): if svc in self.cf_d: return self.cf_d[svc] p = os.path.join(self.base, svc) if not os.path.exists(p): self.cf_d[svc] = {} return self.cf_d[svc] if svc not in self.cf_d: self.cf_d[svc] = {} with open(p, 'r') as f: for line in f.read().split('\n'): if '=' not in line: continue l = line.split('=') if len(l) != 2: continue var = l[0].strip() val = l[1].strip() self.cf_d[svc][var] = val return self.cf_d[svc] def fix_item(self, svc, item, target): if item not in self.known_props: perror('xinetd service', svc, item+': unknown property in compliance rule') return RET_ERR cf = self.get_svc(svc) if item in cf and cf[item] == target: return RET_OK p = os.path.join(self.base, svc) if not os.path.exists(p): perror(p, "does not exist") return RET_ERR done = False with open(p, 'r') as f: buff = f.read().split('\n') for i, line in enumerate(buff): if '=' not in line: continue l = line.split('=') if len(l) != 2: continue var = l[0].strip() if var != item: continue l[1] = target buff[i] = "= ".join(l) done = True if not done: with open(p, 'r') as f: buff = f.read().split('\n') for i, line in enumerate(buff): if '=' not in line: continue l = line.split('=') if len(l) != 2: continue buff.insert(i, item+" = "+target) done = True break if not done: perror("failed to set", item, "=", target, "in", p) return RET_ERR with open(p, 'w') as f: f.write("\n".join(buff)) pinfo("set", item, "=", target, "in", p) return RET_OK def check_item(self, svc, item, target, verbose=False): if item not in self.known_props: perror('xinetd service', svc, item+': unknown property in compliance rule') return RET_ERR cf = self.get_svc(svc) if item in cf and target == cf[item]: if verbose: pinfo('xinetd service', svc, item+':', cf[item]) return RET_OK elif item in cf: if verbose: perror('xinetd service', svc, item+':', cf[item], 'target:', target) else: if verbose: perror('xinetd service', svc, item+': unset', 'target:', target) return RET_ERR def check_svc(self, svc, props): r = 0 for prop in props: r |= self.check_item(svc, prop, props[prop], verbose=True) return r def fix_svc(self, svc, props): r = 0 for prop in props: r |= self.fix_item(svc, prop, props[prop]) return r def check(self): r = 0 for svc, props in self.svcs.items(): r |= self.check_svc(svc, props) return r def fix(self): r = 0 for svc, props in self.svcs.items(): r |= self.fix_svc(svc, props) return r if __name__ == "__main__": main(Xinetd) opensvc-1.8~20170412/var/compliance/com.opensvc/fileprop.py0000755000175000017500000002263713073467726023626 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_FILEPROP_", "example_value": """ { "path": "/some/path/to/file", "mode": "750", "uid": 500, "gid": 500, } """, "description": """* Verify file existance, mode and ownership. * The collector provides the format with wildcards. * The module replace the wildcards with contextual values. In fix() the file is created empty with the right mode & ownership. """, "form_definition": """ Desc: | A fileprop rule, fed to the 'fileprop' compliance object to verify the target file ownership and permissions. Css: comp48 Outputs: - Dest: compliance variable Class: fileprop Type: json Format: dict Inputs: - Id: path Label: Path DisplayModeLabel: path LabelCss: action16 Mandatory: Yes Help: File path to check the ownership and permissions for. Type: string - Id: mode Label: Permissions DisplayModeLabel: perm LabelCss: action16 Help: "In octal form. Example: 644" Type: integer - Id: uid Label: Owner DisplayModeLabel: uid LabelCss: guy16 Help: Either a user ID or a user name Type: string or integer - Id: gid Label: Owner group DisplayModeLabel: gid LabelCss: guy16 Help: Either a group ID or a group name Type: string or integer """, } import os import sys import json import stat import re import pwd import grp sys.path.append(os.path.dirname(__file__)) from comp import * class CompFileProp(CompObject): def __init__(self, prefix=None): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self._usr = {} self._grp = {} self.sysname, self.nodename, x, x, self.machine = os.uname() self.files = [] for rule in self.get_rules(): try: self.files += self.add_file(rule) except InitError: continue except ValueError: perror('fileprop: failed to parse variable', os.environ[k]) if len(self.files) == 0: raise NotApplicable() def add_file(self, d): if 'path' not in d: perror('fileprop: path should be in the dict:', d) RET = RET_ERR return [] try: d["uid"] = int(d["uid"]) except: pass try: d["gid"] = int(d["gid"]) except: pass return [d] def fixable(self): return RET_NA def check_file_type(self, f, verbose=False): r = RET_OK if not os.path.exists(f["path"].rstrip("/")): if verbose: perror("fileprop:", f["path"], "does not exist") r = RET_ERR elif f["path"].endswith("/") and not os.path.isdir(f["path"]): if verbose: perror("fileprop:", f["path"], "exists but is not a directory") r = RET_ERR elif not f["path"].endswith("/") and os.path.isdir(f["path"]): if verbose: perror("fileprop:", f["path"], "exists but is a directory") r = RET_ERR return r def check_file_mode(self, f, verbose=False): if 'mode' not in f: return RET_OK try: mode = oct(stat.S_IMODE(os.stat(f['path']).st_mode)) except: if verbose: perror("fileprop:", f['path'], 'can not stat file') return RET_ERR mode = str(mode).lstrip("0") if mode != str(f['mode']): if verbose: perror("fileprop:", f['path'], 'mode should be %s but is %s'%(f['mode'], mode)) return RET_ERR return RET_OK def get_uid(self, uid): if uid in self._usr: return self._usr[uid] tuid = uid if isinstance(uid, (str, unicode)): try: info=pwd.getpwnam(uid) tuid = info[2] self._usr[uid] = tuid except: perror("fileprop:", "user %s does not exist"%uid) raise ComplianceError() return tuid def get_gid(self, gid): if gid in self._grp: return self._grp[gid] tgid = gid if isinstance(gid, (str, unicode)): try: info=grp.getgrnam(gid) tgid = info[2] self._grp[gid] = tgid except: perror("fileprop:", "group %s does not exist"%gid) raise ComplianceError() return tgid def check_file_uid(self, f, verbose=False): if 'uid' not in f: return RET_OK tuid = self.get_uid(f['uid']) try: uid = os.stat(f['path']).st_uid except: if verbose: perror("fileprop:", f['path'], 'can not stat file') return RET_ERR if uid != tuid: if verbose: perror("fileprop:", f['path'], 'uid should be %s but is %s'%(tuid, str(uid))) return RET_ERR return RET_OK def check_file_gid(self, f, verbose=False): if 'gid' not in f: return RET_OK tgid = self.get_gid(f['gid']) try: gid = os.stat(f['path']).st_gid except: if verbose: perror("fileprop:", f['path'], 'can not stat file') return RET_ERR if gid != tgid: if verbose: perror("fileprop:", f['path'], 'gid should be %s but is %s'%(tgid, str(gid))) return RET_ERR return RET_OK def check_file_exists(self, f): if not os.path.exists(f['path']): return RET_ERR return RET_OK def check_file(self, f, verbose=False): if self.check_file_type(f, verbose) == RET_ERR: return RET_ERR r = 0 r |= self.check_file_mode(f, verbose) r |= self.check_file_uid(f, verbose) r |= self.check_file_gid(f, verbose) if r == 0 and verbose: pinfo("fileprop:", f['path'], "is ok") return r def fix_file_mode(self, f): if 'mode' not in f: return RET_OK if self.check_file_mode(f) == RET_OK: return RET_OK try: pinfo("fileprop:", "%s mode set to %s"%(f['path'], str(f['mode']))) os.chmod(f['path'], int(str(f['mode']), 8)) except: return RET_ERR return RET_OK def fix_file_owner(self, f): uid = -1 gid = -1 if 'uid' not in f and 'gid' not in f: return RET_OK if 'uid' in f and self.check_file_uid(f) != RET_OK: uid = self.get_uid(f['uid']) if 'gid' in f and self.check_file_gid(f) != RET_OK: gid = self.get_gid(f['gid']) if uid == -1 and gid == -1: return RET_OK try: os.chown(f['path'], uid, gid) except: perror("fileprop:", "failed to set %s ownership to %d:%d"%(f['path'], uid, gid)) return RET_ERR pinfo("fileprop:", "%s ownership set to %d:%d"%(f['path'], uid, gid)) return RET_OK def fix_file_notexists(self, f): if not os.path.exists(f['path'].rstrip("/")): if f['path'].endswith("/"): try: os.makedirs(f['path']) pinfo("fileprop:", f['path'], "created") except: perror("fileprop:", "failed to create", f['path']) return RET_ERR return RET_OK else: dirname = os.path.dirname(f['path']) if not os.path.exists(dirname): pinfo("fileprop:", "create", dirname) try: os.makedirs(dirname) except Exception as e: perror("fileprop:", "failed to create", dirname) return RET_ERR pinfo("fileprop:", "touch", f['path']) open(f['path'], 'a').close() elif f['path'].endswith("/") and not os.path.isdir(f['path']): pinfo("fileprop:", "delete file", f['path'].rstrip("/")) try: os.unlink(f['path'].rstrip("/")) except Exception as e: perror("fileprop:", e) return RET_ERR pinfo("fileprop:", "make directory", f['path']) try: os.makedirs(f['path']) except Exception as e: perror("fileprop:", e) return RET_ERR elif not f['path'].endswith("/") and os.path.isdir(f['path']): perror("fileprop:", "cowardly refusing to remove the existing", f['path'], "directory to create a regular file") return RET_ERR if self.check_file_exists(f) == RET_OK: return RET_OK d = os.path.dirname(f['path']) if not os.path.exists(d): os.makedirs(d) try: os.chown(d, f['uid'], f['gid']) except: pass try: with open(f['path'], 'w') as fi: fi.write('') except: return RET_ERR pinfo("fileprop:", f['path'], "created") return RET_OK def check(self): r = 0 for f in self.files: r |= self.check_file(f, verbose=True) return r def fix(self): r = 0 for f in self.files: r |= self.fix_file_notexists(f) r |= self.fix_file_mode(f) r |= self.fix_file_owner(f) return r if __name__ == "__main__": main(CompFileProp) opensvc-1.8~20170412/var/compliance/com.opensvc/symlink.py0000755000175000017500000000735113073467726023470 0ustar jkelbertjkelbert#!/usr/bin/env python data = { "default_prefix": "OSVC_COMP_FILE_", "example_value": """ { "symlink": "/tmp/foo", "target": "/tmp/bar" } """, "description": """* Verify symlink's existance. * The collector provides the format with wildcards. * The module replace the wildcards with contextual values. * In the 'fix' the symlink is created (and intermediate dirs if required). * There is no check or fix for target's existance. * There is no check or fix for mode or ownership of either symlink or target. """, "form_definition": """ Desc: | A symfile rule, fed to the 'symlink' compliance object to create a Unix symbolic link. Css: comp48 Outputs: - Dest: compliance variable Class: symlink Type: json Format: dict Inputs: - Id: symlink Label: Symlink path DisplayModeLabel: symlink LabelCss: hd16 Mandatory: Yes Help: The full path of the symbolic link to check or create. Type: string - Id: target Label: Target path DisplayModeLabel: target LabelCss: hd16 Mandatory: Yes Help: The full path of the target file pointed by the symlink. Type: string """ } import os import errno import sys import stat import re import pwd import grp sys.path.append(os.path.dirname(__file__)) from comp import * class InitError(Exception): pass class CompSymlink(CompObject): def __init__(self, prefix='OSVC_COMP_SYMLINK_'): CompObject.__init__(self, prefix=prefix, data=data) def init(self): self.sysname, self.nodename, x, x, self.machine = os.uname() self.symlinks = [] for rule in self.get_rules(): try: self.symlinks += self.add_symlink(rule) except InitError: continue except ValueError: perror('symlink: failed to parse variable', rule) def add_symlink(self, v): if 'symlink' not in v: perror('symlink should be in the dict:', d) RET = RET_ERR return [] if 'target' not in v: perror('target should be in the dict:', d) RET = RET_ERR return [] return [v] def fixable(self): return RET_NA def check_symlink_exists(self, f): if not os.path.islink(f['symlink']): return RET_ERR return RET_OK def check_symlink(self, f, verbose=False): if not os.path.islink(f['symlink']): perror("symlink", f['symlink'], "does not exist") return RET_ERR if os.readlink(f['symlink']) != f['target']: perror("symlink", f['symlink'], "does not point to", f['target']) return RET_ERR if verbose: pinfo("symlink", f['symlink'], "->", f['target'], "is ok") return RET_OK def fix_symlink_notexists(self, f): if self.check_symlink_exists(f) == RET_OK: return RET_OK d = os.path.dirname(f['symlink']) if not os.path.exists(d): try: os.makedirs(d) except OSError as e: if e.errno == 20: perror("symlink: can not create dir", d, "to host the symlink", f['symlink'], ": a parent is not a directory") return RET_ERR raise try: os.symlink(f['target'], f['symlink']) except: return RET_ERR pinfo("symlink", f['symlink'], "->", f['target'], "created") return RET_OK def check(self): r = 0 for f in self.symlinks: r |= self.check_symlink(f, verbose=True) return r def fix(self): r = 0 for f in self.symlinks: r |= self.fix_symlink_notexists(f) return r if __name__ == "__main__": main(CompSymlink) opensvc-1.8~20170412/osvcenv.cmd0000644000175000017500000000030013073467726016424 0ustar jkelbertjkelbert@echo off set OSVCROOT=C:\Program Files\opensvc set OSVCPYTHONROOT=%OSVCROOT%\python set PYTHONPATH=%OSVCROOT%\lib set OSVCPYTHONEXEC=%OSVCPYTHONROOT%\python.exe call inpath.cmd OSVCPYTHONROOTopensvc-1.8~20170412/.gitignore0000644000175000017500000000037213073467726016255 0ustar jkelbertjkelbert*.pyc *.class *.swp etc/ log/ python/ var/ test/ tmp/ bin/python lib/version.py bin/pkg/release_* bin/pkg/winbuilder/wxs/opensvcfiles.wxs .hg/ .hgignore lib/rcLocalEnv.py *.wixobj *.wixpdb *.msi nohup.out .project .pydevproject bin/init/opensvc.init opensvc-1.8~20170412/cron.cmd0000644000175000017500000000011213073467726015703 0ustar jkelbertjkelbert@echo off call osvcenv.cmd "%OSVCPYTHONEXEC%" "%OSVCROOT%\cron\opensvc" %*opensvc-1.8~20170412/lib/0000755000175000017500000000000013073467726015031 5ustar jkelbertjkelbertopensvc-1.8~20170412/lib/resIpGce.py0000644000175000017500000001504113073467726017105 0ustar jkelbertjkelbertimport resIp import os import rcStatus from rcGlobalEnv import rcEnv import rcExceptions as ex from rcUtilities import getaddr, justcall import json import rcGce rcIfconfig = __import__('rcIfconfig'+rcEnv.sysname) class Ip(resIp.Ip, rcGce.Gce): def __init__(self, rid=None, ipname=None, ipdev=None, eip=None, routename=None, gce_zone=None, **kwargs): resIp.Ip.__init__(self, rid=rid, ipname=ipname, ipdev=ipdev, **kwargs) self.label = "gce ip %s@%s" % (ipname, ipdev) if eip: self.label += ", eip %s" % eip self.eip = eip self.routename = routename self.gce_zone = gce_zone # cache for route data self.gce_route_data = None def start_local_route(self): if self.has_local_route(): self.log.info("ip route %s/32 dev %s is already installed" % (self.addr, self.ipdev)) return self.add_local_route() def stop_local_route(self): if not self.has_local_route(): self.log.info("ip route %s/32 dev %s is already uninstalled" % (self.addr, self.ipdev)) return self.del_local_route() def add_local_route(self): cmd = ["ip", "route", "replace", self.addr+"/32", "dev", self.ipdev] self.vcall(cmd) def del_local_route(self): cmd = ["ip", "route", "del", self.addr+"/32", "dev", self.ipdev] self.vcall(cmd) def has_local_route(self): cmd = ["ip", "route", "list", self.addr+"/32", "dev", self.ipdev] out, err, ret = justcall(cmd) if ret != 0: return False if out == "": return False return True def start_gce_route(self): if not self.routename: return if self.has_gce_route(): self.log.info("gce route %s, %s to instance %s is already installed" % (self.routename, self.addr, rcEnv.nodename)) return if self.exist_gce_route(): self.del_gce_route() self.add_gce_route() self.svc.gce_routes_cache[self.routename] = { "destRange": self.addr+"/32", "nextHopInstance": rcEnv.nodename, } def stop_gce_route(self): if not self.routename: return if not self.has_gce_route(): self.log.info("gce route %s, %s to instance %s is already uninstalled" % (self.routename, self.addr, rcEnv.nodename)) return self.del_gce_route() self.get_gce_routes_list(refresh=True) del(self.svc.gce_routes_cache[self.routename]) def add_gce_route(self): cmd = ["gcloud", "compute", "routes", "-q", "create", self.routename, "--destination-range", self.addr+"/32", "--next-hop-instance", rcEnv.nodename, "--next-hop-instance-zone", self.gce_zone] self.vcall(cmd) def del_gce_route(self): cmd = ["gcloud", "compute", "routes", "-q", "delete", self.routename] self.vcall(cmd) def get_gce_route_data(self, refresh=False): data = self.get_gce_routes_list(refresh=refresh) if data is None: return if not self.routename in data: return return data[self.routename] def get_gce_routes_list(self, refresh=False): if not refresh and hasattr(self.svc, "gce_routes_cache"): return self.svc.gce_routes_cache self.svc.gce_routes_cache = self._get_gce_routes_list() return self.svc.gce_routes_cache def _get_gce_routes_list(self): if not self.routename: return routenames = " ".join([r.routename for r in self.svc.get_resources("ip") if hasattr(r, "routename")]) self.wait_gce_auth() cmd = ["gcloud", "compute", "routes", "list", "--format", "json", routenames] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("gcloud route describe returned with error: %s, %s" % (out, err)) try: data = json.loads(out) except: raise ex.excError("unable to parse gce route data: %s" % out) h = {} for route in data: h[route["name"]] = route return h def exist_gce_route(self): if not self.routename: return True data = self.get_gce_route_data() if not data: return False if data: return True return False def has_gce_route(self): if not self.routename: return True data = self.get_gce_route_data() if not data: return False if data.get("destRange") != self.addr+"/32": return False if data.get("nextHopInstance").split("/")[-1] != rcEnv.nodename: return False return True def is_up(self): """Returns True if ip is associated with this node """ self.getaddr() if not self.has_local_route(): return False if not self.has_gce_route(): return False return True def _status(self, verbose=False): self.getaddr() try: local_status = self.has_local_route() if not local_status: self.status_log("local route is not installed") except ex.excError as e: self.status_log(str(e)) local_status = False try: gce_status = self.has_gce_route() if not gce_status: self.status_log("gce route is not installed") except ex.excError as e: self.status_log(str(e)) gce_status = False s = local_status & gce_status if rcEnv.nodename in self.always_on: if s: return rcStatus.STDBY_UP else: return rcStatus.STDBY_DOWN else: if s: return rcStatus.UP else: return rcStatus.DOWN def check_ping(self, count=1, timeout=5): pass def start(self): self.getaddr() self.start_local_route() self.start_gce_route() def stop(self): self.getaddr() self.stop_local_route() # don't unconfigure the gce route: too long. let the start replace it if necessary. #def provision(self): #m = __import__("provIpGce") #prov = getattr(m, "ProvisioningIp")(self) #prov.provisioner() opensvc-1.8~20170412/lib/rcUpdatePkgOSF1.py0000644000175000017500000000106513073467726020247 0ustar jkelbertjkelbertfrom __future__ import print_function import os import sys import tarfile from rcGlobalEnv import rcEnv repo_subdir = "tar" def update(fpath): oldpath = os.getcwd() os.chdir("/") tar = tarfile.open(fpath) try: tar.extractall() tar.close() except: try: os.unlink(fpath) except: pass print("failed to unpack", file=sys.stderr) return 1 try: os.unlink(fpath) except: pass cmd = sys.executable + ' ' + rcEnv.postinstall return os.system(cmd) opensvc-1.8~20170412/lib/resContainerVz.py0000644000175000017500000001133413073467726020361 0ustar jkelbertjkelbertimport os import rcStatus import resources as Res from rcUtilities import which, qcall, justcall import resContainer import rcExceptions as ex from rcGlobalEnv import rcEnv class Vz(resContainer.Container): def files_to_sync(self): return [self._cf] def get_cf_value(self, param): value = None try: cf = self.cf() except: return value with open(cf, 'r') as f: for line in f.readlines(): if param not in line: continue if line.strip()[0] == '#': continue l = line.replace('\n', '').split('=') if len(l) < 2: continue if l[0].strip() != param: continue value = ' '.join(l[1:]).strip().rstrip('/') break return value def get_rootfs(self): with open(self.cf(), 'r') as f: for line in f: if 'VE_PRIVATE' in line: return line.strip('\n').split('=')[1].strip('"').replace('$VEID', self.name) self.log.error("could not determine lxc container rootfs") return ex.excError def rcp_from(self, src, dst): rootfs = self.get_rootfs() if len(rootfs) == 0: raise ex.excError() src = rootfs + src cmd = ['cp', src, dst] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret def rcp(self, src, dst): rootfs = self.get_rootfs() if len(rootfs) == 0: raise ex.excError() dst = rootfs + dst cmd = ['cp', src, dst] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret def install_drp_flag(self): rootfs = self.get_rootfs() flag = os.path.join(rootfs, ".drp_flag") self.log.info("install drp flag in container : %s"%flag) with open(flag, 'w') as f: f.write(' ') f.close() def vzctl(self, action, options=[]): cmd = ['vzctl', action, self.name] + options ret, out, err = self.vcall(cmd) if ret != 0: raise ex.excError return out def container_start(self): self.vzctl('start') def container_stop(self): self.vzctl('stop') def container_forcestop(self): raise ex.excError def operational(self): cmd = self.runmethod + ['/sbin/ifconfig', '-a'] ret = qcall(cmd) if ret == 0: return True return False def is_up_on(self, nodename): return self.is_up(nodename) def is_up(self, nodename=None): """ CTID 101 exist mounted running """ cmd = ['vzctl', 'status', self.name] if nodename is not None: cmd = rcEnv.rsh.split() + [nodename] + cmd ret, out, err = self.call(cmd) if ret != 0: return False l = out.split() if len(l) != 5: return False if l[2] != 'exist' or \ l[3] != 'mounted' or \ l[4] != 'running': return False return True def get_container_info(self): return {'vcpus': '0', 'vmem': '0'} def check_manual_boot(self): try: cf = self.cf() except: return True with open(self.cf(), 'r') as f: for line in f: if 'ONBOOT' in line and 'yes' in line: return False return True def check_capabilities(self): if not which('vzctl'): self.log.debug("vzctl is not in PATH") return False return True def cf(self): if not os.path.exists(self._cf): self.log.error("%s does not exist"%self._cf) raise ex.excError return self._cf def __init__(self, rid, name, guestos="Linux", osvc_root_path=None, **kwargs): resContainer.Container.__init__(self, rid=rid, name=name, type="container.vz", guestos=guestos, osvc_root_path=osvc_root_path, **kwargs) self._cf = os.path.join(os.sep, 'etc', 'vz', 'conf', name+'.conf') self.runmethod = ['vzctl', 'exec', name] def __str__(self): return "%s name=%s" % (Res.Resource.__str__(self), self.name) opensvc-1.8~20170412/lib/checkMpathPowerpathAIX.py0000777000175000017500000000000013073467726026216 2checkMpathPowerpath.pyustar jkelbertjkelbertopensvc-1.8~20170412/lib/checkFsUsageDarwin.py0000644000175000017500000000267413073467726021114 0ustar jkelbertjkelbertimport checks from rcUtilities import justcall class check(checks.check): chk_type = "fs_u" def find_svc(self, mountpt): for svc in self.svcs: for resource in svc.get_resources('fs'): if resource.mount_point == mountpt: return svc.svcname return '' def do_check(self): cmd = ['df', '-lP'] (out,err,ret) = justcall(cmd) if ret != 0: return self.undef lines = out.split('\n') if len(lines) < 2: return self.undef r = [] for line in lines[1:]: l = line.split() if len(l) != 6: continue # discard bind mounts: we get metric from the source anyway if l[0].startswith('/') and not l[0].startswith('/dev') and not l[0].startswith('//'): continue if l[5].startswith('/Volumes'): continue if l[5].startswith('/run'): continue if l[5].startswith('/sys/'): continue if l[5] == "/dev/shm": continue if "osvc_sync_" in l[0]: # do not report osvc sync snapshots fs usage continue r.append({ 'chk_instance': l[5], 'chk_value': l[4], 'chk_svcname': self.find_svc(l[5]), }) return r opensvc-1.8~20170412/lib/checkRaidSas2.py0000644000175000017500000000726513073467726020023 0ustar jkelbertjkelbertimport checks import os from rcUtilities import justcall, which from rcGlobalEnv import rcEnv class check(checks.check): prefixes = [os.path.join(os.sep, "usr", "local", "admin")] sas2ircu = "sas2ircu" chk_type = "raid" chk_name = "LSI SAS200" def find_sas2ircu(self): if which(self.sas2ircu): return self.sas2ircu for prefix in self.prefixes: sas2ircu = os.path.join(prefix, self.sas2ircu) if os.path.exists(sas2ircu): return sas2ircu return def do_check(self): r = self.do_check_ldpdinfo() return r def do_check_ldpdinfo(self): sas2ircu = self.find_sas2ircu() if sas2ircu is None: return self.undef os.chdir(rcEnv.pathtmp) logs = [os.path.join(rcEnv.pathtmp, 'sas2ircu.log')] for log in logs: if not os.path.exists(log): continue os.unlink(log) cmd = [sas2ircu, 'LIST'] out, err, ret = justcall(cmd) if ret != 0: return self.undef idx = [] lines = out.split('\n') for line in lines: if 'SAS20' in line: l = line.split() idx.append(l[0]) r = [] errs = 0 for ix in idx: cmd = [sas2ircu, str(ix), 'DISPLAY'] out, err, ret = justcall(cmd) lines = out.split('\n') ctrl = "ctrl:"+str(ix) slot="" chk_dsk = 0 for line in lines: if line.startswith('IR volume'): chk_dsk = 2 if line.startswith(' Volume Name') and 'Virtual Disk' in line and (chk_dsk == 2): l = line.split() slot = 'LD'+str(l[-1]) if line.startswith(' Status of volume') and (chk_dsk == 2): if 'Okay (OKY)' not in line: r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', }) errs += 1 else : r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', }) if line.startswith('Device is a Hard disk'): chk_dsk = 1 if line.startswith(' Enclosure #') and (chk_dsk == 1): l = line.split() enc = l[-1] if line.startswith(' Slot #') and (chk_dsk == 1): l = line.split() slot = 'PD'+str(enc)+':'+str(l[-1]) if line.startswith(' State') and (chk_dsk == 1): if 'Optimal (OPT)' not in line: r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', }) errs += 1 else : r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', }) if line.startswith('Device is a Enclosure services device'): chk_dsk = 3 if line.startswith(' Enclosure #') and (chk_dsk == 3): l = line.split() slot = 'Enc'+str(l[-1]) if line.startswith(' State') and (chk_dsk == 3): if 'Standby (SBY)' not in line: r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '1', 'chk_svcname': '', }) errs += 1 else : r.append({ 'chk_instance': ctrl+','+slot, 'chk_value': '0', 'chk_svcname': '', }) r.append({ 'chk_instance': 'all SAS20*', 'chk_value': str(errs), 'chk_svcname': '', }) return r opensvc-1.8~20170412/lib/rcDcs.py0000644000175000017500000002222313073467726016442 0ustar jkelbertjkelbertfrom rcUtilities import justcall, which from xml.etree.ElementTree import XML, fromstring import rcExceptions as ex import os import ConfigParser import uuid from rcGlobalEnv import rcEnv if rcEnv.pathbin not in os.environ['PATH']: os.environ['PATH'] += ":"+rcEnv.pathbin def dcscmd(cmd, manager, username, password, dcs=None, conn=None): if conn is None: conn = uuid.uuid1().hex if len(cmd) == 0: return _cmd = ['ssh', manager] if dcs is not None: _cmd += ["connect-dcsserver -server %s -username %s -password %s -connection %s ; "%(dcs, username, password, conn)+\ cmd+ " ; disconnect-dcsserver -connection %s"%conn] else: _cmd += [cmd] out, err, ret = justcall(_cmd) if "ErrorId" in err: print(_cmd) print(out) raise ex.excError("dcs command execution error") try: out = out.decode("latin1").encode("utf8") except: pass return out, err, ret class Dcss(object): arrays = [] def __init__(self, objects=[]): self.objects = objects if len(objects) > 0: self.filtering = True else: self.filtering = False cf = rcEnv.authconf if not os.path.exists(cf): return conf = ConfigParser.RawConfigParser() conf.read(cf) m = [] for s in conf.sections(): try: stype = conf.get(s, 'type') except: continue if stype != "datacore": continue try: manager = s dcs = conf.get(s, 'dcs').split() username = conf.get(s, 'username') password = conf.get(s, 'password') m += [(manager, dcs, username, password)] except: print("error parsing section", s) pass del(conf) done = [] for manager, dcs, username, password in m: for name in dcs: if self.filtering and name not in self.objects: continue if name in done: continue self.arrays.append(Dcs(name, manager, username, password)) done.append(name) def __iter__(self): for array in self.arrays: yield(array) def get_dcs(self, domain): for dcs in self.arrays: _domain = dcs.get_domain() if _domain == domain: return dcs return None class Dcs(object): def __init__(self, name, manager, username, password, conn=None): self.name = name self.manager = manager self.username = username self.password = password self.conn = conn if conn is None: self.conn = uuid.uuid1().hex self.keys = ['dcsservergroup', 'dcsserver', 'dcspool', 'dcspoolperf', 'dcslogicaldisk', 'dcslogicaldiskperf', 'dcsvirtualdisk', 'dcsphysicaldisk', 'dcsdiskpath', 'dcsport', 'dcspoolmember'] def get_domain(self): if hasattr(self, 'domain'): return self.domain buff = self.get_dcsservergroup() for line in buff.split('\n'): if not line.startswith('Alias'): continue self.domain = line.split(': ')[-1].strip() break if hasattr(self, 'domain'): return self.domain return "unknown" def dcscmd(self, cmd): return dcscmd(cmd, self.manager, self.username, self.password, dcs=self.name, conn=self.conn) def get_dcsservergroup(self): cmd = 'get-dcsservergroup -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcsserver(self): cmd = 'get-dcsserver -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcspool(self): cmd = 'get-dcspool -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcslogicaldisk(self): cmd = 'get-dcslogicaldisk -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcsvirtualdisk(self): cmd = 'get-dcsvirtualdisk -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcsphysicaldisk(self): cmd = 'get-dcsphysicaldisk -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcsdiskpath(self): cmd = 'get-dcsdiskpath -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcspoolmember(self): cmd = 'get-dcspoolmember -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcspoolperf(self): cmd = 'get-dcspool -connection %s | get-dcsperformancecounter -connection %s'%(self.conn, self.conn) print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcslogicaldiskperf(self): cmd = 'get-dcslogicaldisk -connection %s | get-dcsperformancecounter -connection %s'%(self.conn, self.conn) print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def get_dcsport(self): cmd = 'get-dcsport -connection %s'%self.conn print("%s: %s"%(self.name, cmd)) buff = self.dcscmd(cmd)[0] return buff def add_vdisk(self, data): if 'disk_name' not in data: raise ex.excError("'disk_name' key is mandatory") if 'size' not in data: raise ex.excError("'size' key is mandatory") if 'paths' not in data: raise ex.excError("'paths' key is mandatory") data['disk_name'] = data['disk_name'] + '.1' l = data['paths'].split(',') paths = [] for path in l: if 'iqn' in path: c, s = path.split('-iqn') s = 'iqn' + s paths.append((c, s)) elif '-' in path: c, s = path.split('-') paths.append((c, s)) if len(paths) == 0: raise ex.excError("no initiator to present to") pools = data['dg_name'].split(',') if len(pools) == 2: _pool1 = pools[0].split(':') _pool2 = pools[1].split(':') if len(_pool1) != 2 or len(_pool2) != 2: raise ex.excError("'dg_name' value is misformatted") d = { 'disk_name': data['disk_name'], 'size': data['size'], 'sds1': _pool1[0], 'sds2': _pool2[0], 'pool1': _pool1[1], 'pool2': _pool2[1], 'conn': self.conn, } cmd = """$v = Add-DcsVirtualDisk -connection %(conn)s -Name "%(disk_name)s" -Size %(size)dGB -EnableRedundancy -FirstServer %(sds1)s -FirstPool "%(pool1)s" -SecondServer %(sds2)s -SecondPool "%(pool2)s" ;""" % d elif len(pools) == 1: _pool1 = pools[0].split(':') if len(_pool1) != 2: raise ex.excError("'dg_name' value is misformatted") d = { 'disk_name': data['disk_name'], 'size': data['size'], 'sds1': _pool1[0], 'pool1': _pool1[1], 'conn': self.conn, } cmd = """$v = Add-DcsVirtualDisk -connection %(conn)s -Name "%(disk_name)s" -Size %(size)dGB -Server %(sds1)s -Pool "%(pool1)s" ;""" % d else: raise ex.excError("'dg_name' value is misformatted") for machine in self.get_machines(map(lambda x: x[0], paths)): cmd += " $v | Serve-DcsVirtualDisk -connection %s -Machine %s -EnableRedundancy ;"""%(self.conn, machine) print(cmd) out, err, ret = self.dcscmd(cmd) def get_machines(self, ids): for i, id in enumerate(ids): if 'iqn' in id or ('-' in id and len(id) == 16): # iscsi or already in correct format continue # convert to dcs portname format id = list(id.upper()) for j in (14, 12, 10, 8, 6, 4, 2): id.insert(j, '-') id = ''.join(id) ids[i] = id if not hasattr(self, "buff_dcsport"): self.buff_dcsport = self.get_dcsport() machines = set([]) for line in self.buff_dcsport.split('\n'): if line.startswith('HostId'): hostid = line.split(': ')[-1].strip() elif line.startswith('Id'): id = line.split(': ')[-1].strip() if id in ids: machines.add(hostid) return machines if __name__ == "__main__": o = Dcss() for dcs in o: print(dcs.get_dcsserver()) opensvc-1.8~20170412/lib/resShareNfsLinux.py0000644000175000017500000001066713073467726020660 0ustar jkelbertjkelbertimport os from rcGlobalEnv import rcEnv from rcUtilities import justcall, which import rcStatus import rcExceptions as ex from resources import Resource class Share(Resource): def get_exports(self): self.data = {} cmd = [ 'exportfs', '-v' ] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError(err) out = out.replace('\n ', '').replace('\n\t', '') for line in out.split('\n'): words = line.split() if len(words) != 2: continue path = words[0] e = words[1] if path not in self.data: self.data[path] = {} try: client, opts = self.parse_entry(e) except ex.excError as e: continue if client == '': client = '*' self.data[path][client] = opts return self.data def is_up(self): self.issues = {} self.issues_missing_client = [] self.issues_wrong_opts = [] self.issues_none = [] exports = self.get_exports() if self.path not in exports: return False for client in self.opts: if client not in exports[self.path]: self.issues[client] = "%s not exported to client %s"%(self.path, client) self.issues_missing_client.append(client) elif self.opts[client] > exports[self.path][client]: self.issues[client] = "%s is exported to client %s with missing options: current '%s', minimum required '%s'"%(self.path, client, ','.join(exports[self.path][client]), ','.join(self.opts[client])) self.issues_wrong_opts.append(client) else: self.issues_none.append(client) return True def start(self): try: up = self.is_up() except ex.excError as e: self.log.error("skip start because the share is in unknown state") return if up and len(self.issues) == 0: self.log.info("%s is already up" % self.path) return self.can_rollback = True for client, opts in self.opts.items(): if client in self.issues_none: continue if client in self.issues_wrong_opts: cmd = [ 'exportfs', '-u', ':'.join((client, self.path)) ] ret, out, err = self.vcall(cmd) cmd = [ 'exportfs', '-o', ','.join(opts), ':'.join((client, self.path)) ] ret, out, err = self.vcall(cmd) if ret != 0: raise ex.excError def stop(self): try: up = self.is_up() except ex.excError as e: self.log.error("continue with stop even if the share is in unknown state") if not up: self.log.info("%s is already down" % self.path) return 0 for client in self.opts: cmd = [ 'exportfs', '-u', ':'.join((client, self.path)) ] ret, out, err = self.vcall(cmd) if ret != 0: raise ex.excError def _status(self, verbose=False): try: up = self.is_up() except ex.excError as e: self.status_log(str(e)) return rcStatus.WARN if len(self.issues) > 0: self.status_log('\n'.join(self.issues.values())) return rcStatus.WARN if rcEnv.nodename in self.always_on: if up: return rcStatus.STDBY_UP else: return rcStatus.STDBY_DOWN else: if up: return rcStatus.UP else: return rcStatus.DOWN def parse_entry(self, e): if '(' not in e or ')' not in e: raise ex.excError("malformed share opts: '%s'. must be in client(opts) client(opts) format"%e) _l = e.split('(') client = _l[0] opts = _l[1].strip(')') return client, set(opts.split(',')) def __init__(self, rid, path, opts, **kwargs): Resource.__init__(self, rid, type="share.nfs", **kwargs) if not which("exportfs"): raise ex.excInitError("exportfs is not installed") self.label = "nfs:"+path self.path = path l = opts.replace('\\', '').split() self.opts = {} for e in l: try: client, opts = self.parse_entry(e) except ex.excError as e: raise ex.excInitError(str(e)) self.opts[client] = opts opensvc-1.8~20170412/lib/resIpAIX.py0000644000175000017500000000161013073467726017025 0ustar jkelbertjkelbertimport resIp as Res import rcExceptions as ex from rcUtilitiesAIX import check_ping from rcUtilities import to_cidr, to_dotted class Ip(Res.Ip): def check_ping(self, count=1, timeout=5): self.log.info("checking %s availability"%self.addr) return check_ping(self.addr, count=count, timeout=timeout) def arp_announce(self): return def startip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', 'alias', '/'.join([self.addr, to_cidr(self.mask)])] else: cmd = ['ifconfig', self.ipdev, self.addr, 'netmask', to_dotted(self.mask), 'alias'] return self.vcall(cmd) def stopip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete'] else: cmd = ['ifconfig', self.ipdev, self.addr, 'delete'] return self.vcall(cmd) opensvc-1.8~20170412/lib/resContainerOpenstack.py0000644000175000017500000002573313073467726021721 0ustar jkelbertjkelbertimport rcStatus import resources as Res import time import os import rcExceptions as ex from rcGlobalEnv import rcEnv from rcUtilities import justcall from rcUtilitiesLinux import check_ping import resContainer class CloudVm(resContainer.Container): startup_timeout = 240 shutdown_timeout = 120 save_timeout = 240 def __init__(self, rid, name, guestos=None, cloud_id=None, size="tiny", key_name=None, shared_ip_group=None, osvc_root_path=None, **kwargs): resContainer.Container.__init__(self, rid=rid, name=name, type="container.openstack", guestos=guestos, osvc_root_path=osvc_root_path, **kwargs) self.cloud_id = cloud_id self.save_name = name + '.save' self.size_name = size self.key_name = key_name self.shared_ip_group = shared_ip_group self.addr = None def keyfile(self): kf = [os.path.join(rcEnv.pathetc, self.key_name+'.pem'), os.path.join(rcEnv.pathetc, self.key_name+'.pub'), os.path.join(rcEnv.pathvar, self.key_name+'.pem'), os.path.join(rcEnv.pathvar, self.key_name+'.pub')] for k in kf: if os.path.exists(k): return k raise ex.excError("key file for key name '%s' not found"%self.key_name) def rcp_from(self, src, dst): if self.guestos == "Windows": """ Windows has no sshd. """ raise ex.excNotSupported("remote copy not supported on Windows") self.getaddr() if self.addr is None: raise ex.excError('no usable public ip to send files to') timeout = 5 cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no', '-o', 'ConnectTimeout='+str(timeout), '-i', self.keyfile(), self.addr+':'+src, dst] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret def rcp(self, src, dst): if self.guestos == "Windows": """ Windows has no sshd. """ raise ex.excNotSupported("remote copy not supported on Windows") self.getaddr() if self.addr is None: raise ex.excError('no usable public ip to send files to') timeout = 5 cmd = [ 'scp', '-o', 'StrictHostKeyChecking=no', '-o', 'ConnectTimeout='+str(timeout), '-i', self.keyfile(), src, self.addr+':'+dst] out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret def rcmd(self, cmd): if self.guestos == "Windows": """ Windows has no sshd. """ raise ex.excNotSupported("remote commands not supported on Windows") self.getaddr() if self.addr is None: raise ex.excError('no usable public ip to send command to') if type(cmd) == str: cmd = cmd.split(" ") timeout = 5 cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'ForwardX11=no', '-o', 'BatchMode=yes', '-n', '-o', 'ConnectTimeout='+str(timeout), '-i', self.keyfile(), self.addr] + cmd out, err, ret = justcall(cmd) if ret != 0: raise ex.excError("'%s' execution error:\n%s"%(' '.join(cmd), err)) return out, err, ret def get_size(self): c = self.get_cloud() for size in c.driver.list_sizes(): if size.name == self.size_name: return size raise ex.excError("%s size not found"%self.size_name) def get_cloud(self): if hasattr(self, 'cloud'): return self.cloud c = self.svc.node.cloud_get(self.cloud_id) self.cloud = c return self.cloud def get_node(self): c = self.get_cloud() l = c.list_nodes() for n in l: if n.name == self.name: return n return def get_save_name(self): import datetime now = datetime.datetime.now() save_name = self.save_name + now.strftime(".%Y-%m-%d.%H:%M:%S") return save_name def purge_saves(self): c = self.get_cloud() l = c.driver.list_images() d = {} for image in l: if image.name.startswith(self.save_name): d[image.name] = image if len(d) == 0: raise ex.excError("no save image found") elif len(d) == 1: self.log.info("no previous save image to delete") for k in sorted(d.keys())[:-1]: self.log.info("delete previous save image %s"%d[k].name) c.driver.ex_delete_image(d[k]) def get_last_save(self): return self.get_image(self.save_name) def get_template(self): template = self.svc.config.defaults()['template'] return self.get_image(template) def get_image(self, name): c = self.get_cloud() l = c.driver.list_images() d = {} for image in l: if image.name == name: # exact match return image elif image.name.startswith(name): d[image.name] = image if len(d) == 0: raise ex.excError("image %s not found"%name) for k in sorted(d.keys()): last = d[k] return last def has_image(self, name): c = self.get_cloud() l = c.driver.list_images() for image in l: if image.name == name: return True return False def __str__(self): return "%s name=%s" % (Res.Resource.__str__(self), self.name) def getaddr(self): if self.addr is not None: return n = self.get_node() if n is None: raise ex.excError("could not get node details") ips = set(n.public_ips+n.private_ips) if len(ips) == 0: return 0 # find first pinging ip for ip in ips: if check_ping(ip, timeout=1, count=1): self.addr = ip break return 0 def files_to_sync(self): return [] def check_capabilities(self): return True def ping(self): if self.addr is None: return 0 return check_ping(self.addr, timeout=1, count=1) def start(self): if self.is_up(): self.log.info("container %s already started" % self.name) return if rcEnv.nodename in self.svc.drpnodes: self.install_drp_flag() self.container_start() self.can_rollback = True self.wait_for_startup() def container_start(self): n = self.get_node() if n is not None: if n.state == 4: self.log.info("reboot %s"%self.name) self.container_reboot() else: raise ex.excError("abort reboot because vm is in state %d (!=4)"%n.state) else: self.container_restore() def container_reboot(self): c = self.get_cloud() n = self.get_node() try: c.driver.reboot_node(n) except Exception as e: raise ex.excError(str(e)) def container_restore(self): c = self.get_cloud() image = self.get_last_save() size = self.get_size() self.log.info("create instance %s, size %s, image %s, key %s"%(self.name, size.name, image.name, self.key_name)) n = c.driver.create_node(name=self.name, size=size, image=image, ex_keyname=self.key_name, ex_shared_ip_group_id=self.shared_ip_group) self.log.info("wait for container up status") self.wait_for_fn(self.is_up, self.startup_timeout, 5) #n = c.driver.ex_update_node(n, accessIPv4='46.231.128.84') def wait_for_startup(self): pass def stop(self): if self.is_down(): self.log.info("container %s already stopped" % self.name) return self.container_stop() try: self.wait_for_shutdown() except ex.excError: self.container_forcestop() self.wait_for_shutdown() def container_stop(self): cmd = "shutdown -h now" self.log.info("remote command: %s"%cmd) self.rcmd(cmd) def container_forcestop(self): c = self.get_cloud() n = self.get_node() self.container_save() c.driver.destroy_node(n) self.purge_saves() def print_obj(self, n): for k in dir(n): if '__' in k: continue print(k, "=", getattr(n, k)) def container_save(self): c = self.get_cloud() n = self.get_node() save_name = self.get_save_name() if self.has_image(save_name): return #self.print_obj(n) if n.state == 9999: self.log.info("a save is already in progress") return self.log.info("save new image %s"%save_name) try: image = c.driver.ex_save_image(n, save_name) except Exception as e: raise ex.excError(str(e)) import time delay = 5 for i in range(self.save_timeout//delay): img = c.driver.ex_get_image(image.id) if img.extra['status'] != 'SAVING': break time.sleep(delay) if img.extra['status'] != 'ACTIVE': raise ex.excError("save failed, image status %s"%img.extra['status']) def is_up(self): n = self.get_node() if n is not None and n.state == 0: return True return False def get_container_info(self): self.info = {'vcpus': '0', 'vmem': '0'} c = self.get_cloud() n = self.get_node() try: size = c.driver.ex_get_size(n.extra['flavorId']) self.info['vmem'] = str(size.ram) except: pass return self.info def check_manual_boot(self): return True def install_drp_flag(self): pass def provision(self): c = self.get_cloud() image = self.get_template() size = self.get_size() self.log.info("create instance %s, size %s, image %s, key %s"%(self.name, size.name, image.name, self.key_name)) c.driver.create_node(name=self.name, size=size, image=image, ex_keyname=self.key_name, ex_shared_ip_group_id=self.shared_ip_group) #self.wait_for_startup() self.wait_for_fn(self.is_up, self.startup_timeout, 5) opensvc-1.8~20170412/lib/rcStatsAIX.py0000644000175000017500000000776613073467726017410 0ustar jkelbertjkelbertimport os import datetime from rcUtilities import call, which from rcGlobalEnv import rcEnv today = datetime.datetime.today() yesterday = today - datetime.timedelta(days=1) def sarfile(day): f = os.path.join(os.sep, 'var', 'adm', 'sa', 'sa'+day) if os.path.exists(f): return f return None def twodays(fn): if which('sar') is None: return [] lines = fn(yesterday) lines += fn(today) return lines def stats_cpu(): return twodays(stats_cpu_day) def stats_cpu_day(t): d = t.strftime("%Y-%m-%d") day = t.strftime("%d") f = sarfile(day) if f is None: return [] cmd = ['sar', '-u', '-P', 'ALL', '-f', f] (ret, buff, err) = call(cmd, errlog=False) lines = [] for line in buff.split('\n'): l = line.split() if len(l) != 6: continue if l[1] == '%usr': continue if l[0] == 'Average': continue # SunOS: date %usr %sys %wio %idle # xmlrpc: date cpu usr nice sys iowait steal irq soft guest idle nodename x = ['%s %s'%(d, l[0]), 'all', '0', '0', '0', '0', '0', '0', '0', '0', '0', rcEnv.nodename] x[1] = l[1].replace('-', 'all') x[2] = l[2] x[4] = l[3] x[5] = l[4] x[10] = l[5] lines.append(x) return lines def stats_mem_u(file, collect_date=None): return twodays(stats_mem_u_day) def stats_mem_u_day(t): return [] def stats_proc(file, collect_date=None): return twodays(stats_proc_day) def stats_proc_day(t): d = t.strftime("%Y-%m-%d") day = t.strftime("%d") f = sarfile(day) if f is None: return [] cmd = ['sar', '-q', '-f', f] (ret, buff, err) = call(cmd) lines = [] for line in buff.split('\n'): l = line.split() if len(l) < 3: continue if ':' not in l[0]: continue """ xmlrpc: date runq_sz plist_sz ldavg_1 ldavg_5 ldavg_15 nodename """ x = ['%s %s'%(d, l[0]), l[1], '0', '0', '0', '0', rcEnv.nodename] lines.append(x) return lines def stats_swap(file, collect_date=None): return twodays(stats_swap_day) def stats_swap_day(t): return [] def stats_block(file, collect_date=None): return twodays(stats_block_day) def stats_block_day(t): d = t.strftime("%Y-%m-%d") day = t.strftime("%d") f = sarfile(day) if f is None: return [] cmd = ['sar', '-b', '-f', f] (ret, buff, err) = call(cmd) lines = [] for line in buff.split('\n'): l = line.split() if len(l) != 9: continue if ':' not in l[1]: continue """ xmlrpc: date tps rtps wtps rbps wbps nodename """ x = ['%s %s'%(d, l[0]), '0', '0', '0', l[1], l[4], rcEnv.nodename] lines.append(x) return lines def stats_blockdev(file, collect_date=None): return twodays(stats_blockdev_day) def stats_blockdev_day(t): d = t.strftime("%Y-%m-%d") day = t.strftime("%d") f = sarfile(day) if f is None: return [] cmd = ['sar', '-d', '-f', f] (ret, buff, err) = call(cmd, errlog=False) lines = [] for line in buff.split('\n'): l = line.split() if len(l) == 8: date = l[0] if len(l) == 7: l = [date] + l if len(l) != 8: continue if l[1] == 'device': continue if l[0] == 'Average': continue """ xmlrpc: 22:05:01 DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util 00:00:00 device %busy avque r+w/s blks/s avwait avserv """ x = ['%s %s'%(d, l[0]), l[1], l[4], '0', '0', '0', l[3], l[6], l[7], l[2], rcEnv.nodename] lines.append(x) return lines def stats_netdev(file, collect_date=None): return twodays(stats_netdev_day) def stats_netdev_day(t): return [] def stats_netdev_err(file, collect_date=None): return twodays(stats_netdev_err_day) def stats_netdev_err_day(t): return [] opensvc-1.8~20170412/lib/rcUtilitiesOSF1.py0000644000175000017500000000040613073467726020334 0ustar jkelbertjkelbertfrom rcUtilities import call def check_ping(addr, timeout=5, count=1): cmd = ['ping', '-c', repr(count), '-t', repr(timeout), addr] (ret, out, err) = call(cmd) if ret == 0: return True return False opensvc-1.8~20170412/lib/hostidDarwin.py0000644000175000017500000000010213073467726020033 0ustar jkelbertjkelbertfrom uuid import getnode def hostid(): return str(getnode()) opensvc-1.8~20170412/lib/rcUtilitiesWindows.py0000644000175000017500000000140413073467726021255 0ustar jkelbertjkelbertimport os import re from rcUtilities import justcall import string from ctypes import windll def check_ping(addr, timeout=5, count=1): ping = 'ping.exe' cmd = [ping, '-n', repr(count), '-w', repr(timeout), addr] out, err, ret = justcall(cmd) if ret == 0: return True return False def get_registry_value(key, subkey, value): import _winreg key = getattr(_winreg, key) handle = _winreg.OpenKey(key, subkey) (value, type) = _winreg.QueryValueEx(handle, value) return value def get_drives(): drives = [] bitmask = windll.kernel32.GetLogicalDrives() for letter in string.uppercase: if bitmask & 1: drives.append(letter) bitmask >>= 1 return drives opensvc-1.8~20170412/lib/rcStatsHP-UX.py0000644000175000017500000001521613073467726017615 0ustar jkelbertjkelbertimport os from rcUtilities import call from rcGlobalEnv import rcEnv import rcStats class StatsProvider(rcStats.StatsProvider): def glancefile(self, day): f = os.path.join(rcEnv.pathvar, 'glance'+day) if os.path.exists(f): return f return None def cpu(self, d, day, start, end): f = self.glancefile(day) if f is None: return [], [] cols = ['date', 'cpu', 'usr', 'nice', 'sys', 'iowait', 'steal', 'irq', 'soft', 'guest', 'idle', 'nodename'] lines = [] with open(f, 'r') as file: for line in file: l = line.split() if len(l) != 24: continue """ hpux: usr nice sys irq wait idle 1 2 3 4 5 6 xmlrpc: date cpu usr nice sys iowait steal irq soft guest idle nodename """ ts = '%s %s'%(d, l[0]) ts = ts.replace('\0','') x = [ts, 'all', l[1], l[2], l[3], l[5], '0', l[4], '0', '0', l[6], self.nodename] lines.append(x) return cols, lines def mem_u(self, d, day, start, end): f = self.glancefile(day) if f is None: return [], [] cols = ['date', 'kbmemfree', 'kbmemused', 'pct_memused', 'kbbuffers', 'kbcached', 'kbcommit', 'pct_commit', 'kbmemsys', 'nodename'] lines = [] with open(f, 'r') as file: for line in file: l = line.split() if len(l) != 24: continue """ hpux: phys kbmemfree kbcached kbfilecached kbsys kbuser kbswapused kbswap 7 8 9 10 11 12 13 14 xmlrpc: date kbmemfree kbmemused pct_memused kbbuffers kbcached kbcommit pct_commit kbmemsys nodename """ phys = int(l[7]) free = int(l[8]) swapused = int(l[13]) swap = int(l[14]) used = phys - free commit = used + swapused vm = phys + swap if vm == 0 or phys == 0: continue pct_commit = 100 * commit / vm pct_used = 100 * used / phys ts = '%s %s'%(d, l[0]) ts = ts.replace('\0','') x = [ts, l[8], str(used), str(pct_used), l[9], l[10], str(commit), str(pct_commit), l[11], self.nodename] lines.append(x) return cols, lines def proc(self, d, day, start, end): f = self.glancefile(day) if f is None: return [], [] cols = ['date', 'runq_sz', 'plist_sz', 'ldavg_1', 'ldavg_5', 'ldavg_15', 'nodename'] lines = [] with open(f, 'r') as file: for line in file.readlines(): l = line.split() if len(l) != 24: continue """ hpux: GBL_LOADAVG GBL_LOADAVG5 GBL_LOADAVG15 GBL_CPU_QUEUE TBL_PROC_TABLE_USED 15 16 17 18 19 xmlrpc: date runq_sz plist_sz ldavg_1 ldavg_5 ldavg_15 nodename """ ts = '%s %s'%(d, l[0]) ts = ts.replace('\0','') x = [ts, l[18], l[19], l[15], l[16], l[17], self.nodename] lines.append(x) return cols, lines def swap(self, d, day, start, end): f = self.glancefile(day) if f is None: return [], [] lines = [] cols = ['date', 'kbswpfree', 'kbswpused', 'pct_swpused', 'kbswpcad', 'pct_swpcad', 'nodename'] with open(f, 'r') as file: for line in file.readlines(): l = line.split() if len(l) != 24: continue """ hpux: kbswapused kbswap 13 14 xmlrpc: date kbswpfree kbswpused pct_swpused kbswpcad pct_swpcad nodename """ swapused = int(l[13]) swap = int(l[14]) swapfree = swap - swapused ts = '%s %s'%(d, l[0]) ts = ts.replace('\0','') x = [ts, str(swapfree), l[13], str(100 * swapused / swap), '0', '0', self.nodename] lines.append(x) return cols, lines def block(self, d, day, start, end): f = self.glancefile(day) if f is None: return [], [] cols = ['date', 'tps', 'rtps', 'wtps', 'rbps', 'wbps', 'nodename'] lines = [] with open(f, 'r') as file: for line in file.readlines(): l = line.split() if len(l) != 24: continue """ hpux: rio wio rkb wkb 20 21 22 23 xmlrpc: date tps rtps wtps rbps wbps nodename """ tps = float(l[20]) + float(l[21]) ts = '%s %s'%(d, l[0]) ts = ts.replace('\0','') x = [ts, str(tps), l[20], l[21], l[22], l[23], self.nodename] lines.append(x) return cols, lines opensvc-1.8~20170412/lib/provFsExt3.py0000644000175000017500000000017613073467726017432 0ustar jkelbertjkelbertimport provFs class ProvisioningFs(provFs.ProvisioningFs): mkfs = ['mkfs.ext3', '-F', '-q'] info = ['tune2fs', '-l'] opensvc-1.8~20170412/lib/resIpFreeBSD.py0000644000175000017500000000163013073467726017620 0ustar jkelbertjkelbertimport resIp as Res import rcExceptions as ex from rcUtilitiesFreeBSD import check_ping from rcUtilities import to_cidr, to_dotted class Ip(Res.Ip): def check_ping(self, count=1, timeout=5): self.log.info("checking %s availability"%self.addr) return check_ping(self.addr, count=count, timeout=timeout) def arp_announce(self): return def startip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add'] else: cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'netmask', to_dotted(self.mask), 'add'] return self.vcall(cmd) def stopip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete'] else: cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'delete'] return self.vcall(cmd) opensvc-1.8~20170412/lib/tabulate.py0000644000175000017500000006364613073467726017223 0ustar jkelbertjkelbert# -*- coding: utf-8 -*- """Pretty-print tabular data.""" from __future__ import print_function from __future__ import unicode_literals from collections import namedtuple from platform import python_version_tuple import re if python_version_tuple()[0] < "3": from itertools import izip_longest _none_type = type(None) _int_type = int _float_type = float _text_type = unicode _binary_type = str else: from itertools import zip_longest as izip_longest from functools import reduce _none_type = type(None) _int_type = int _float_type = float _text_type = str _binary_type = bytes __all__ = ["tabulate"] __version__ = "0.6" def __text_type(s): try: return _text_type(s, errors="ignore") except: return s Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader", "linebetweenrows", "linebelow", "headerrow", "datarow", "padding", "usecolons", "usehtmlattrs", "with_header_hide", "without_header_hide"]) _format_defaults = {"padding": 0, "usecolons": False, "usehtmlattrs": False, "with_header_hide": [], "without_header_hide": []} _table_formats = {"simple": TableFormat(lineabove=None, linebelowheader=Line("", "-", " ", ""), linebetweenrows=None, linebelow=Line("", "-", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, usecolons=False, usehtmlattrs=False, with_header_hide=["linebelow"], without_header_hide=[]), "plain": TableFormat(lineabove=None, linebelowheader=None, linebetweenrows=Line("+", "-", "+", "+"), linebelow=None, headerrow=DataRow(" ", " ", " "), datarow=DataRow("|", "|", "|"), padding=0, usecolons=_format_defaults["usecolons"], usehtmlattrs=_format_defaults["usehtmlattrs"], with_header_hide=_format_defaults["with_header_hide"], without_header_hide=_format_defaults["without_header_hide"]), "grid": TableFormat(lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("+", "=", "+", "+"), linebetweenrows=Line("+", "-", "+", "+"), linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, usecolons=False, usehtmlattrs=False, with_header_hide=[], without_header_hide=["linebelowheader"]), "pipe": TableFormat(lineabove=None, linebelowheader=Line("|", "-", "|", "|"), linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, usecolons=True, usehtmlattrs=False, with_header_hide=[], without_header_hide=[]), "orgtbl": TableFormat(lineabove=None, linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, usecolons=False, usehtmlattrs=False, with_header_hide=[], without_header_hide=["linebelowheader"]), "rst": TableFormat(lineabove=Line("", "=", " ", ""), linebelowheader=Line("", "=", " ", ""), linebetweenrows=None, linebelow=Line("", "=", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, usecolons=False, usehtmlattrs=False, with_header_hide=[], without_header_hide=["linebelowheader"]), "mediawiki": TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"", "", "", "\n|+ \n|-"), linebelowheader=Line("|-", "", "", ""), linebetweenrows=Line("|-", "", "", ""), linebelow=Line("|}", "", "", ""), headerrow=DataRow("!", "!!", ""), datarow=DataRow("|", "||", ""), padding=1, usecolons=False, usehtmlattrs=True, with_header_hide=[], without_header_hide=["linebelowheader"])} _invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes def simple_separated_format(separator): """Construct a simple TableFormat with columns separated by a separator. >>> tsv = simple_separated_format("\t") ; \ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == u'foo \\t 1\\nspam\\t23' True """ return TableFormat(None, None, None, None, headerrow=None, datarow=DataRow('', '\t', ''), padding=_format_defaults["padding"], usecolons=_format_defaults["usecolons"], usehtmlattrs=_format_defaults["usehtmlattrs"], with_header_hide=_format_defaults["with_header_hide"], without_header_hide=_format_defaults["without_header_hide"]) def _isconvertible(conv, string): try: n = conv(string) return True except ValueError: return False def _isnumber(string): """ >>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False """ return _isconvertible(float, string) def _isint(string): """ >>> _isint("123") True >>> _isint("123.45") False """ return type(string) is int or \ (isinstance(string, _binary_type) or isinstance(string, _text_type)) and \ _isconvertible(int, string) def _type(string, has_invisible=True): """The least generic type (type(None), int, float, str, unicode). >>> _type(None) is type(None) True >>> _type("foo") is type("") True >>> _type("1") is type(1) True >>> _type(u'\x1b[31m42\x1b[0m') is type(42) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True """ if has_invisible and \ (isinstance(string, _text_type) or isinstance(string, _binary_type)): string = _strip_invisible(string) if string is None: return _none_type elif _isint(string): return int elif _isnumber(string): return float elif isinstance(string, _binary_type): return _binary_type else: return _text_type def _afterpoint(string): """Symbols after a decimal point, -1 if the string lacks the decimal point. >>> _afterpoint("123.45") 2 >>> _afterpoint("1001") -1 >>> _afterpoint("eggs") -1 >>> _afterpoint("123e45") 2 """ if _isnumber(string): if _isint(string): return -1 else: pos = string.rfind(".") pos = string.lower().rfind("e") if pos < 0 else pos if pos >= 0: return len(string) - pos - 1 else: return -1 # no point else: return -1 # not a number def _padleft(width, s, has_invisible=True): """Flush right. >>> _padleft(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430' True """ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width fmt = u"{0:>%ds}" % iwidth return fmt.format(s) def _padright(width, s, has_invisible=True): """Flush left. >>> _padright(6, u'\u044f\u0439\u0446\u0430') == u'\u044f\u0439\u0446\u0430 ' True """ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width fmt = u"{0:<%ds}" % iwidth return fmt.format(s) def _padboth(width, s, has_invisible=True): """Center string. >>> _padboth(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430 ' True """ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width fmt = u"{0:^%ds}" % iwidth return fmt.format(s) def _strip_invisible(s): "Remove invisible ANSI color codes." return re.sub(_invisible_codes, "", s) def _visible_width(s): """Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5) """ if isinstance(s, _text_type) or isinstance(s, _binary_type): return len(_strip_invisible(s)) else: return len(_text_type(s)) def _align_column(strings, alignment, minwidth=0, has_invisible=True): """[string] -> [padded_string] >>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234'] """ if alignment == "right": strings = [s.strip() for s in strings] padfn = _padleft elif alignment in "center": strings = [s.strip() for s in strings] padfn = _padboth elif alignment in "decimal": decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft else: strings = [s.strip() for s in strings] padfn = _padright if has_invisible: width_fn = _visible_width else: width_fn = len maxwidth = max(max(map(width_fn, strings)), minwidth) padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings] return padded_strings def _more_generic(type1, type2): types = { _none_type: 0, int: 1, float: 2, _text_type: 4 } invtypes = { 4: _text_type, 2: float, 1: int, 0: _none_type } moregeneric = max(types.get(type1, 4), types.get(type2, 4)) return invtypes[moregeneric] def _column_type(strings, has_invisible=True): """The least generic type all column values are convertible to. >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", u'\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True """ types = [_type(s, has_invisible) for s in strings ] return reduce(_more_generic, types, int) def _format(val, valtype, floatfmt, missingval=u""): """Format a value accoding to its type. Unicode is supported: >>> hrow = [u'\u0431\u0443\u043a\u0432\u0430', u'\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [[u'\u0430\u0437', 2], [u'\u0431\u0443\u043a\u0438', 4]] ; \ good_result = u'\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ if val is None: return missingval if valtype in [int, _binary_type, _text_type]: return u"{0}".format(__text_type(val)) elif valtype is float: return format(float(val), floatfmt) else: return u"{0}".format(val) def _align_header(header, alignment, width): if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) else: return _padleft(width, header) def _normalize_tabular_data(tabular_data, headers): """Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * 2D NumPy arrays * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = tabular_data.keys() rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) keys = tabular_data.keys() vals = tabular_data.values # values matrix doesn't need to be transposed names = tabular_data.index rows = [[v]+list(row) for v,row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": headers = list(map(_text_type,keys)) # headers should be strings else: # it's a usual an iterable of iterables, or a NumPy array rows = list(tabular_data) if headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(headers) rows = list(map(list,rows)) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [u""]*(ncols - nhs) + headers return rows, headers def tabulate(tabular_data, headers=[], tablefmt="simple", floatfmt="g", numalign="decimal", stralign="left", missingval=u""): """Format a fixed width table for pretty printing. >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) --- --------- 1 2.34 -56 8.999 2 10001 --- --------- The first required argument (`tabular_data`) can be a list-of-lists (or another iterable or iterables), a dictionary of iterables, a two-dimensional NumPy array, or a Pandas' dataframe. Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R and Pandas' dataframes. >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column alignment ---------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numalign`, `stralign`) are: right, center, left, decimal (only for `numalign`). Table formats ------------- `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. `None` values are replaced with a `missingval` string: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingval="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tablefmt`) are supported: 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', and 'mediawiki'. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "pipe")) | strings | numbers | |:----------|----------:| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) |:-----|---------:| | spam | 41.9999 | | eggs | 451 | "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They are slightly different from "pipe" format by not using colons to define column alignment, and using a "+" sign to indicate line intersections: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "orgtbl")) | strings | numbers | |-----------+-----------| | spam | 41.9999 | | eggs | 451 | >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) | spam | 41.9999 | | eggs | 451 | "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== "mediawiki" produces a table markup used in Wikipedia and on other MediaWiki-based sites: >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], ... headers="firstrow", tablefmt="mediawiki")) {| class="wikitable" style="text-align: left;" |+ |- ! strings !! align="right"| numbers |- | spam || align="right"| 41.9999 |- | eggs || align="right"| 451 |} >>> print(tabulate([["eggs", 42], ["spam", 23]], tablefmt="mediawiki", stralign="left")) {| class="wikitable" style="text-align: left;" |+ |- | eggs || align="right"| 42 |- | spam || align="right"| 23 |} """ list_of_lists, headers = _normalize_tabular_data(tabular_data, headers) # optimization: look for ANSI control codes once, # enable smart width functions only if a control code is found plain_text = u'\n'.join(['\t'.join(map(_text_type, headers))] + \ [u'\t'.join(map(_text_type, row)) for row in list_of_lists]) has_invisible = re.search(_invisible_codes, plain_text) if has_invisible: width_fn = _visible_width else: width_fn = len # format rows and columns, convert numeric values to strings cols = list(zip(*list_of_lists)) coltypes = list(map(_column_type, cols)) cols = [[_format(v, ct, floatfmt, missingval) for v in c] for c,ct in zip(cols, coltypes)] # align columns aligns = [numalign if ct in [int,float] else stralign for ct in coltypes] minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols) cols = [_align_column(c, a, minw, has_invisible) for c, a, minw in zip(cols, aligns, minwidths)] if headers: # align headers and add headers minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)] headers = [_align_header(h, a, minw) for h, a, minw in zip(headers, aligns, minwidths)] rows = list(zip(*cols)) else: minwidths = [width_fn(c[0]) for c in cols] rows = list(zip(*cols)) if not isinstance(tablefmt, TableFormat): tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) return _format_table(tablefmt, headers, rows, minwidths, aligns) def _build_row(cells, padding, begin, sep, end): "Return a string which represents a row of data cells." pad = u" "*padding padded_cells = [pad + cell + pad for cell in cells] return (begin + sep.join(padded_cells) + end).rstrip() def _build_line(colwidths, padding, begin, fill, sep, end): "Return a string which represents a horizontal line." cells = [fill*(w + 2*padding) for w in colwidths] return _build_row(cells, 0, begin, sep, end) def _mediawiki_cell_attrs(row, colaligns): "Prefix every cell in a row with an HTML alignment attribute." alignment = { "left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| ' } row2 = [alignment[a] + c for c, a in zip(row, colaligns)] return row2 def _line_segment_with_colons(linefmt, align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" fill = linefmt.hline w = colwidth if align in ["right", "decimal"]: return (fill[0] * (w - 1)) + ":" elif align == "center": return ":" + (fill[0] * (w - 2)) + ":" elif align == "left": return ":" + (fill[0] * (w - 1)) else: return fill[0] * w def _format_table(fmt, headers, rows, colwidths, colaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if headers else fmt.without_header_hide pad = fmt.padding headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow if fmt.usehtmlattrs: headers = _mediawiki_cell_attrs(headers, colaligns) rows = [_mediawiki_cell_attrs(row, colaligns) for row in rows] if fmt.lineabove and "lineabove" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.lineabove)) if headers: lines.append(_build_row(headers, pad, *headerrow)) if fmt.linebelowheader and "linebelowheader" not in hidden: begin, fill, sep, end = fmt.linebelowheader if fmt.usecolons: segs = [_line_segment_with_colons(fmt.linebelowheader, a, w + 2*pad) for w,a in zip(colwidths, colaligns)] lines.append(_build_row(segs, 0, begin, sep, end)) else: lines.append(_build_line(colwidths, pad, *fmt.linebelowheader)) if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in rows[:-1]: lines.append(_build_row(row, pad, *fmt.datarow)) lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows)) # the last row without a line below lines.append(_build_row(rows[-1], pad, *fmt.datarow)) else: for row in rows: lines.append(_build_row(row, pad, *fmt.datarow)) if fmt.linebelow and "linebelow" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.linebelow)) return "\n".join(lines) opensvc-1.8~20170412/lib/hostidFreeBSD.py0000777000175000017500000000000013073467726022711 2hostidLinux.pyustar jkelbertjkelbertopensvc-1.8~20170412/lib/resIpDarwin.py0000644000175000017500000000160513073467726017634 0ustar jkelbertjkelbertimport resIp as Res import rcExceptions as ex from rcUtilitiesFreeBSD import check_ping from rcUtilities import to_cidr class Ip(Res.Ip): def check_ping(self, count=1, timeout=5): self.log.info("checking %s availability"%self.addr) return check_ping(self.addr, count=count, timeout=timeout) def arp_announce(self): return def startip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add'] else: cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'netmask', '0xffffffff', 'add'] return self.vcall(cmd) def stopip_cmd(self): if ':' in self.addr: cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete'] else: cmd = ['ifconfig', self.ipdev, 'inet', self.addr, 'delete'] return self.vcall(cmd) opensvc-1.8~20170412/lib/rcIfconfigSunOS.py0000644000175000017500000001134213073467726020405 0ustar jkelbertjkelbertfrom subprocess import * import rcIfconfig class ifconfig(rcIfconfig.ifconfig): def __init__(self, ifconfig=None, mcast=False): self.intf = [] if mcast: self.mcast_data = self.get_mcast() else: self.mcast_data = {} if ifconfig is not None: out = ifconfig else: out = Popen(['/usr/sbin/ifconfig', '-a'], stdin=None, stdout=PIPE,stderr=PIPE,close_fds=True).communicate()[0] self.parse(out) def get_mcast(self): cmd = ['netstat', '-gn'] out = Popen(cmd, stdout=PIPE).communicate()[0] return self.parse_mcast(out) def parse_mcast(self, out): lines = out.split('\n') found = False data = {} for i, line in enumerate(lines): if line.startswith('--'): found = True break if not found: return data if len(lines) == i+1: return data lines = lines[i+1:] for i, line in enumerate(lines): if len(line) == 0: break try: intf, addr, refcnt = line.split() except: continue if intf not in data: data[intf] = [addr] else: data[intf] += [addr] if len(lines) <= i + 1: return data lines = lines[i+1:] for i, line in enumerate(lines): if line.startswith('--'): found = True break if not found: return data if len(lines) == i+1: return data lines = lines[i+1:] for i, line in enumerate(lines): if len(line) == 0: break try: intf, addr, refcnt = line.split() except: continue if intf not in data: data[intf] = [addr] else: data[intf] += [addr] return data def set_hwaddr(self, i): if i is None or i.hwaddr != '' or ':' not in i.name: return i base_ifname, index = i.name.split(':') base_intf = self.interface(base_ifname) if base_intf is not None and len(base_intf.hwaddr) > 0: i.hwaddr = base_intf.hwaddr else: i.hwaddr = self.mac_from_arp(i.ipaddr) return i def mac_from_arp(self, ipaddr): cmd = ['/usr/sbin/arp', ipaddr] p = Popen(cmd, stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: return '' for word in out.split(): if ':' not in word: continue return word return '' def parse(self, out): i = None for l in out.split("\n"): if l == '' : break if l[0]!='\t' : i = self.set_hwaddr(i) (ifname,ifstatus)=l.split(': ') i=rcIfconfig.interface(ifname) self.intf.append(i) # defaults i.link_encap = '' i.scope = '' i.bcast = '' i.mask = '' i.mtu = '' i.ipaddr = '' i.ip6addr = [] i.ip6mask = [] i.hwaddr = '' i.groupname = '' i.flag_up = False i.flag_broadcast = False i.flag_running = False i.flag_multicast = False i.flag_ipv4 = False i.flag_ipv6 = False i.flag_loopback = False if 'UP' in ifstatus : i.flag_up = True if 'DEPRECATED' in ifstatus : i.flag_deprecated = True if 'BROADCAST' in ifstatus : i.flag_broadcast = True if 'RUNNING' in ifstatus : i.flag_running = True if 'MULTICAST' in ifstatus : i.flag_multicast = True if 'IPv4' in ifstatus : i.flag_ipv4 = True if 'IPv6' in ifstatus : i.flag_ipv6 = True else: n=0 w=l.split() while n < len(w) : [p,v]=w[n:n+2] if p == 'inet' : i.ipaddr=v elif p == 'netmask' : i.mask=v elif p == 'broadcast' : i.bcast=v elif p == 'ether' : i.hwaddr=v elif p == 'groupname' : i.groupname=v elif p == 'inet6' : (a, m) = v.split('/') i.ip6addr += [a] i.ip6mask += [m] n+=2 i = self.set_hwaddr(i) if __name__ == "__main__": ifaces = ifconfig(mcast=True) print(ifaces) opensvc-1.8~20170412/lib/rcFreenas.py0000644000175000017500000007622413073467726017326 0ustar jkelbertjkelbertfrom __future__ import print_function import sys import os import json from optparse import Option import requests from rcConfigParser import RawConfigParser import rcExceptions as ex from rcGlobalEnv import rcEnv, Storage from rcUtilities import convert_size, bdecode from rcOptParser import OptParser try: requests.packages.urllib3.disable_warnings() except: pass VERIFY = False PROG = "nodemgr array" OPT = Storage({ "help": Option( "-h", "--help", default=None, action="store_true", dest="parm_help", help="show this help message and exit"), "array": Option( "-a", "--array", default=None, action="store", dest="array_name", help="The name of the array, as defined in auth.conf"), "name": Option( "--name", default=None, action="store", dest="name", help="The object name"), "volume": Option( "--volume", default=None, action="store", dest="volume", help="The volume to create the disk into"), "size": Option( "--size", default="0", action="store", dest="size", help="The disk size, expressed as a size expression like 1g, 100mib, ..."), "target": Option( "--target", action="append", dest="targets", help="A target name to export the disk through. Can be set multiple times."), "blocksize": Option( "--blocksize", default=512, type=int, action="store", dest="blocksize", help="The exported disk blocksize in B"), "secure_tpc": Option( "--secure-tpc", default=True, action="store_false", dest="insecure_tpc", help="Set the insecure_tpc flag to False"), "compression": Option( "--compression", default="on", action="store", dest="compression", choices=["on", "off", "inherit", "lzjb", "lz4", "gzip", "gzip-9", "zle"], help="Toggle compression"), "dedup": Option( "--dedup", default="off", action="store", dest="dedup", choices=["on", "off"], help="Toggle dedup"), "naa": Option( "--naa", default=None, action="store", dest="naa", help="The disk naa identifier"), "initiator": Option( "--initiator", action="append", dest="initiators", help="An initiator iqn. Can be specified multiple times."), "auth_network": Option( "--auth-network", default="ALL", action="store", dest="auth_network", help="Network authorized to access to the iSCSI target. ip or cidr addresses or 'ALL' for any ips"), "comment": Option( "--comment", action="store", dest="comment", help="Description for your reference"), "id": Option( "--id", action="store", type=int, dest="id", help="An object id, as reported by a list action"), "alias": Option( "--alias", action="store", dest="alias", help="An object name alias"), "target_id": Option( "--target-id", action="store", type=int, dest="target_id", help="The target object id"), "authgroup_id": Option( "--auth-group-id", action="store", type=int, dest="authgroup_id", help="The auth group object id"), "authtype": Option( "--auth-type", action="store", default="None", dest="authtype", choices=["None", "CHAP", "CHAP Mutual"], help="None, CHAP, CHAP Mutual"), "portal_id": Option( "--portal-id", action="store", type=int, dest="portal_id", help="The portal object id"), "initiatorgroup_id": Option( "--initiatorgroup-id", action="store", type=int, dest="initiatorgroup_id", help="The initiator group object id"), "mappings": Option( "--mappings", action="append", dest="mappings", help="A :,,... mapping used in add map in replacement of --targetgroup and --initiatorgroup. Can be specified multiple times."), }) GLOBAL_OPTS = [ OPT.array, ] DEPRECATED_ACTIONS = [] ACTIONS = { "Add actions": { "add_iscsi_file": { "msg": "Add and present a file-backed iscsi disk", "options": [ OPT.name, OPT.volume, OPT.size, OPT.target, OPT.blocksize, OPT.secure_tpc, OPT.mappings, ], }, "add_iscsi_zvol": { "msg": "Add and present a zvol-backed iscsi disk", "options": [ OPT.name, OPT.volume, OPT.size, OPT.target, OPT.blocksize, OPT.secure_tpc, OPT.compression, OPT.dedup, OPT.mappings, ], }, "add_iscsi_initiatorgroup": { "msg": "Declare a group of iscsi initiator iqn, for use in targetgroups which are portal-target-initiator relations", "options": [ OPT.initiator, OPT.comment, OPT.auth_network, ], }, "add_iscsi_target": { "msg": "Declare a iscsi target, for use in targetgroups which are portal-target-initiator relations", "options": [ OPT.name, OPT.alias, ], }, "add_iscsi_targetgroup": { "msg": "Declare a iscsi target group, which is a portal-target-initiator relation", "options": [ OPT.portal_id, OPT.target_id, OPT.initiatorgroup_id, OPT.authgroup_id, OPT.authtype, ], }, }, "Delete actions": { "del_iscsi_file": { "msg": "Delete and unpresent a file-backed iscsi disk", "options": [ OPT.name, OPT.naa, ], }, "del_iscsi_zvol": { "msg": "Delete and unpresent a zvol-backed iscsi disk", "options": [ OPT.name, OPT.naa, ], }, "del_iscsi_initiatorgroup": { "msg": "Delete a group of iscsi initiator iqn, used in targets which are portal-target-initiator relations", "options": [ OPT.id, ], }, "del_iscsi_target": { "msg": "Delete a iscsi target, used in targets which are portal-target-initiator relations", "options": [ OPT.id, ], }, "del_iscsi_targetgroup": { "msg": "Delete a iscsi target group, which is a portal-target-initiator relation", "options": [ OPT.id, ], }, }, "Modify actions": { "resize_zvol": { "msg": "Resize a zvol", "options": [ OPT.name, OPT.naa, OPT.size, ], }, }, "List actions": { "list_mappings": { "msg": "List configured volumes", "options": [ OPT.name, OPT.naa, ], }, "list_volume": { "msg": "List configured volumes", }, "list_iscsi_portal": { "msg": "List configured portals", }, "list_iscsi_target": { "msg": "List configured targets", }, "list_iscsi_targetgroup": { "msg": "List configured target groups", }, "list_iscsi_targettoextent": { "msg": "List configured target-to-extent relations", }, "list_iscsi_extent": { "msg": "List configured extents", }, "list_iscsi_initiatorgroup": { "msg": "List configured initiator groups", }, }, } class Freenass(object): arrays = [] def __init__(self, objects=[]): self.objects = objects self.filtering = len(objects) > 0 cf = rcEnv.authconf if not os.path.exists(cf): return conf = RawConfigParser() conf.read(cf) m = [] for s in conf.sections(): try: stype = conf.get(s, 'type') except: continue if stype != "freenas": continue try: name = s api = conf.get(s, 'api') username = conf.get(s, 'username') password = conf.get(s, 'password') m += [(name, api, username, password)] except: print("error parsing section", s) del conf done = [] for name, api, username, password in m: if self.filtering and name not in self.objects: continue if name in done: continue self.arrays.append(Freenas(name, api, username, password)) done.append(name) def __iter__(self): for array in self.arrays: yield(array) def get_freenas(self, name): for array in self.arrays: if array.name == name: return array return None class Freenas(object): def __init__(self, name, api, username, password): self.node = None self.name = name self.api = api self.username = username self.password = password self.auth = (username, password) self.keys = ['version', 'volumes', 'iscsi_targets', 'iscsi_targettoextents', 'iscsi_extents'] def delete(self, uri, data=None): api = self.api+uri+"/" headers = {'Content-Type': 'application/json'} r = requests.delete(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers) return r def put(self, uri, data=None): api = self.api+uri+"/" headers = {'Content-Type': 'application/json'} r = requests.put(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers) return bdecode(r.content) def post(self, uri, data=None): api = self.api+uri+"/" headers = {'Content-Type': 'application/json'} r = requests.post(api, data=json.dumps(data), auth=self.auth, verify=VERIFY, headers=headers) return bdecode(r.content) def post2(self, uri, data=None): api = self.api.replace("api/v1.0", "")+uri s = requests.Session() r = s.get(api) csrf_token = r.cookies['csrftoken'] data["csrfmiddlewaretoken"] = csrf_token r = requests.post(api, data=data, auth=self.auth, verify=VERIFY) return bdecode(r.content) def get(self, uri, params=None): r = requests.get(self.api+uri+"/?format=json", params=params, auth=self.auth, verify=VERIFY) return bdecode(r.content) def get_version(self): buff = self.get("/system/version") return buff def get_volume(self, name): buff = self.get("/storage/volume/%s" % name, {"limit": 0}) return buff def get_volume_datasets(self, name): buff = self.get("/storage/volume/%s/datasets" % name, {"limit": 0}) return buff def get_volumes(self): buff = self.get("/storage/volume", {"limit": 0}) return buff def get_iscsi_target_id(self, tgt_id): buff = self.get("/services/iscsi/target/%d" % tgt_id) return buff def get_iscsi_targets(self): buff = self.get("/services/iscsi/target", {"limit": 0}) return buff def get_iscsi_targettoextents(self): buff = self.get("/services/iscsi/targettoextent", {"limit": 0}) return buff def get_iscsi_extents(self): buff = self.get("/services/iscsi/extent", {"limit": 0}) return buff def get_iscsi_portal(self): buff = self.get("/services/iscsi/portal", {"limit": 0}) return buff def get_iscsi_targetgroup(self): buff = self.get("/services/iscsi/targetgroup", {"limit": 0}) return buff def get_iscsi_targetgroup_id(self, tg_id): buff = self.get("/services/iscsi/targetgroup/%d" % tg_id) return buff def get_iscsi_authorizedinitiator(self): buff = self.get("/services/iscsi/authorizedinitiator", {"limit": 0}) return buff def get_iscsi_authorizedinitiator_id(self, initiator_id): buff = self.get("/services/iscsi/authorizedinitiator/%d" % initiator_id) return buff def get_iscsi_target_ids(self, target_names): buff = self.get_iscsi_targets() data = json.loads(buff) l = [] for target in data: if target["iscsi_target_name"] in target_names: l.append(target["id"]) return l def get_iscsi_extents_data(self): buff = self.get_iscsi_extents() data = json.loads(buff) return data def get_iscsi_extent(self, naa=None, name=None): data = self.get_iscsi_extents_data() if naa and not naa.startswith("0x"): naa = "0x" + naa for extent in data: if name and name == extent["iscsi_target_extent_name"]: return extent if naa and naa == extent["iscsi_target_extent_naa"]: return extent def del_iscsi_extent(self, extent_id): path = "/services/iscsi/extent/%d" % extent_id response = self.delete(path) if response.status_code != 204: raise ex.excError("delete error: %s (%d)" % (path, response.status_code)) def add_iscsi_zvol_extent(self, name=None, size=None, volume=None, insecure_tpc=True, blocksize=512, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) data = self.add_zvol(name=name, size=size, volume=volume, **kwargs) d = { "iscsi_target_extent_type": "Disk", "iscsi_target_extent_name": name, "iscsi_target_extent_insecure_tpc": insecure_tpc, "iscsi_target_extent_blocksize": blocksize, "iscsi_target_extent_disk": "zvol/%s/%s" % (volume, name), } buff = self.post("/services/iscsi/extent", d) data = json.loads(buff) return data def add_iscsi_file_extent(self, name=None, size=None, volume=None, insecure_tpc=True, blocksize=512, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) size = convert_size(size, _to="MB") d = { "iscsi_target_extent_type": "File", "iscsi_target_extent_name": name, "iscsi_target_extent_insecure_tpc": insecure_tpc, "iscsi_target_extent_blocksize": blocksize, "iscsi_target_extent_filesize": str(size)+"MB", "iscsi_target_extent_path": "/mnt/%s/%s" % (volume, name), } buff = self.post("/services/iscsi/extent", d) data = json.loads(buff) return data def add_iscsi_targets_to_extent(self, extent_id=None, targets=None, **kwargs): for key in ["extent_id", "targets"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) target_ids = self.get_iscsi_target_ids(targets) data = [] for target_id in target_ids: data.append(self.add_iscsi_target_to_extent(target_id, extent_id)) return data def add_iscsi_target_to_extent(self, target_id, extent_id): d = { "iscsi_target": target_id, "iscsi_extent": extent_id, } buff = self.post("/services/iscsi/targettoextent", d) data = json.loads(buff) return data def del_zvol(self, name=None, volume=None, **kwargs): for key in ["name", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) path = '/storage/volume/%s/zvols/%s' % (volume, name) response = self.delete(path) if response.status_code != 204: raise ex.excError("delete error: %s (%d)" % (path, response.status_code)) def add_zvol(self, name=None, size=None, volume=None, compression="inherit", dedup="off", **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) size = convert_size(size, _to="MB") d = { "name": name, "volsize": str(size)+"MB", "compression": compression, "dedup": dedup, } buff = self.post('/storage/volume/%s/zvols/' % volume, d) try: return json.loads(buff) except ValueError: raise ex.excError(buff) def get_zvol(self, volume=None, name=None): buff = self.get('/storage/volume/%s/zvols/%s' % (volume, name)) try: return json.loads(buff) except ValueError: raise ex.excError(buff) def list_mappings(self, name=None, naa=None, **kwargs): tte_data = json.loads(self.get_iscsi_targettoextents()) if name is not None or naa is not None: data = self.get_iscsi_extent(name=name, naa=naa) if data is None: raise ex.excError("extent not found") extent_id = data["id"] tte_data = [d for d in tte_data if d["iscsi_extent"] == extent_id] extent_data = {} for d in json.loads(self.get_iscsi_extents()): extent_data[d["id"]] = d target_data = {} for d in json.loads(self.get_iscsi_targets()): target_data[d["id"]] = d tg_by_target = {} for d in json.loads(self.get_iscsi_targetgroup()): if d["iscsi_target"] in tg_by_target: tg_by_target[d["iscsi_target"]].append(d) else: tg_by_target[d["iscsi_target"]] = [d] ig_data = {} for d in json.loads(self.get_iscsi_authorizedinitiator()): ig_data[d["id"]] = d mappings = {} for d in tte_data: for tg in tg_by_target[d["iscsi_target"]]: ig_id = tg["iscsi_target_initiatorgroup"] ig = ig_data[ig_id] for hba_id in ig["iscsi_target_initiator_initiators"].split("\n"): tgt_id = target_data[tg["iscsi_target"]]["iscsi_target_name"] mappings[hba_id+":"+tgt_id] = { "disk_id": extent_data[d["iscsi_extent"]]["iscsi_target_extent_naa"].replace("0x", ""), "tgt_id": tgt_id, "hba_id": hba_id, } return mappings def resize_zvol(self, name=None, naa=None, size=None, **kwargs): if size is None: raise ex.excError("'size' key is mandatory") if name is None and naa is None: raise ex.excError("'name' or 'naa' must be specified") data = self.get_iscsi_extent(name=name, naa=naa) if data is None: raise ex.excError("extent not found") volume = self.extent_volume(data) if volume is None: raise ex.excError("volume not found") if size.startswith("+"): incr = convert_size(size.lstrip("+"), _to="MB") zvol_data = self.get_zvol(volume=volume, name=data["iscsi_target_extent_name"]) current_size = convert_size(int(zvol_data["volsize"]), _to="MB") size = str(current_size + incr) + "MB" else: size = str(convert_size(size, _to="MB")) + "MB" d = { "volsize": size, } buff = self.put('/storage/volume/%s/zvols/%s' % (volume, data["iscsi_target_extent_name"]), d) try: return json.loads(buff) except ValueError: raise ex.excError(buff) def del_iscsi_initiatorgroup(self, ig_id=None, **kwargs): content = self.get_iscsi_authorizedinitiator_id(ig_id) try: data = json.loads(content) except ValueError: raise ex.excError("initiator group not found") self._del_iscsi_initiatorgroup(ig_id=ig_id, **kwargs) print(json.dumps(data, indent=8)) return data def _del_iscsi_initiatorgroup(self, ig_id=None, **kwargs): if id is None: raise ex.excError("'id' in mandatory") response = self.delete('/services/iscsi/authorizedinitiator/%d' % ig_id) if response.status_code != 204: raise ex.excError(str(response)) def add_iscsi_initiatorgroup(self, **kwargs): data = self._add_iscsi_initiatorgroup(**kwargs) print(json.dumps(data, indent=8)) return data def _add_iscsi_initiatorgroup(self, initiators=None, auth_network="ALL", comment=None, **kwargs): for key in ["initiators"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) d = { "iscsi_target_initiator_initiators": ",".join(initiators), "iscsi_target_initiator_auth_network": auth_network, } if comment: d["iscsi_target_initiator_comment"] = comment buff = self.post('/services/iscsi/authorizedinitiator/', d) try: return json.loads(buff) except ValueError: raise ex.excError(buff) # targetgroup def del_iscsi_targetgroup(self, tg_id=None, **kwargs): content = self.get_iscsi_targetgroup_id(tg_id) try: data = json.loads(content) except ValueError: raise ex.excError("target group not found") self._del_iscsi_targetgroup(tg_id=tg_id, **kwargs) print(json.dumps(data, indent=8)) return data def _del_iscsi_targetgroup(self, tg_id=None, **kwargs): if tg_id is None: raise ex.excError("'tg_id' is mandatory") response = self.delete('/services/iscsi/targetgroup/%d' % tg_id) if response.status_code != 204: raise ex.excError(str(response)) def add_iscsi_targetgroup(self, **kwargs): data = self._add_iscsi_targetgroup(**kwargs) print(json.dumps(data, indent=8)) return data def _add_iscsi_targetgroup(self, portal_id=None, initiatorgroup_id=None, target_id=None, authtype="None", authgroup_id=None, **kwargs): for key in ["portal_id", "initiatorgroup_id", "target_id"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) d = { "iscsi_target": target_id, "iscsi_target_initiatorgroup": initiatorgroup_id, "iscsi_target_portalgroup": portal_id, "iscsi_target_authtype": authtype, "iscsi_target_authgroup": -1, "iscsi_target_initialdigest": "Auto", } if authgroup_id: d["iscsi_target_authgroup"] = authgroup_id print(d) buff = self.post('/services/iscsi/targetgroup/', d) try: return json.loads(buff) except ValueError: raise ex.excError(buff) # target def del_iscsi_target(self, target_id=None, **kwargs): content = self.get_iscsi_target_id(target_id) try: data = json.loads(content) except ValueError: raise ex.excError("target not found") self._del_iscsi_target(target_id=target_id, **kwargs) print(json.dumps(data, indent=8)) return data def _del_iscsi_target(self, target_id=None, **kwargs): if target_id is None: raise ex.excError("'target_id' is mandatory") response = self.delete('/services/iscsi/target/%d' % target_id) if response.status_code != 204: raise ex.excError(str(response)) def add_iscsi_target(self, **kwargs): data = self._add_iscsi_target(**kwargs) print(json.dumps(data, indent=8)) return data def _add_iscsi_target(self, name=None, alias=None, **kwargs): for key in ["name"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) d = { "iscsi_target_name": name, } if alias: d["iscsi_target_alias"] = alias buff = self.post('/services/iscsi/target/', d) try: return json.loads(buff) except ValueError: raise ex.excError(buff) def add_iscsi_file(self, name=None, size=None, volume=None, targets=None, mappings=None, insecure_tpc=True, blocksize=512, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) if targets is None and mappings is None: raise ex.excError("'targets' or 'mappings' must be specified") if mappings is not None and targets is None: targets = self.translate_mappings(mappings) data = self.add_iscsi_file_extent(name=name, size=size, volume=volume, **kwargs) if "id" not in data: if "iscsi_target_extent_name" in data: if isinstance(data["iscsi_target_extent_name"], list): raise ex.excError("\n".join(data["iscsi_target_extent_name"])) raise ex.excError(data["iscsi_target_extent_name"]) raise ex.excError(str(data)) self.add_iscsi_targets_to_extent(extent_id=data["id"], targets=targets, **kwargs) disk_id = data["iscsi_target_extent_naa"].replace("0x", "") results = { "driver_data": data, "disk_id": disk_id, "disk_devid": data["id"], "mappings": self.list_mappings(naa=disk_id), } return results def del_iscsi_file(self, name=None, naa=None, **kwargs): if name is None and naa is None: raise ex.excError("'name' or 'naa' must be specified") data = self.get_iscsi_extent(name=name, naa=naa) if data is None: return self.del_iscsi_extent(data["id"]) print(json.dumps(data, indent=8)) def translate_mappings(self, mappings): targets = set() for mapping in mappings: elements = mapping.split(":") targets |= set(elements[-1].split(",")) targets = list(targets) return targets def add_iscsi_zvol(self, name=None, size=None, volume=None, targets=None, mappings=None, insecure_tpc=True, blocksize=512, **kwargs): for key in ["name", "size", "volume"]: if locals()[key] is None: raise ex.excError("'%s' key is mandatory" % key) if targets is None and mappings is None: raise ex.excError("'targets' or 'mappings' must be specified") if mappings is not None and targets is None: targets = self.translate_mappings(mappings) data = self.add_iscsi_zvol_extent(name=name, size=size, volume=volume, **kwargs) if "id" not in data: if "iscsi_target_extent_name" in data: if isinstance(data["iscsi_target_extent_name"], list): raise ex.excError("\n".join(data["iscsi_target_extent_name"])) raise ex.excError(data["iscsi_target_extent_name"]) raise ex.excError(str(data)) self.add_iscsi_targets_to_extent(extent_id=data["id"], targets=targets, **kwargs) self.add_diskinfo(data, size, volume) disk_id = data["iscsi_target_extent_naa"].replace("0x", "") results = { "driver_data": data, "disk_id": disk_id, "disk_devid": data["id"], "mappings": self.list_mappings(naa=disk_id), } return results def del_iscsi_zvol(self, name=None, naa=None, **kwargs): if name is None and naa is None: raise ex.excError("'name' or 'naa' must be specified") data = self.get_iscsi_extent(name=name, naa=naa) if data is None: return volume = self.extent_volume(data) self.del_iscsi_extent(data["id"]) self.del_zvol(name=name, volume=volume) self.del_diskinfo(data["iscsi_target_extent_naa"].replace("0x", "")) print(json.dumps(data, indent=8)) def extent_volume(self, data): path = data["iscsi_target_extent_path"].split("/") volume = path[path.index("zvol")+1] return volume def list_volume(self, **kwargs): data = json.loads(self.get_volumes()) print(json.dumps(data, indent=8)) def list_iscsi_target(self, **kwargs): data = json.loads(self.get_iscsi_targets()) print(json.dumps(data, indent=8)) def list_iscsi_targettoextent(self, **kwargs): data = json.loads(self.get_iscsi_targettoextents()) print(json.dumps(data, indent=8)) def list_iscsi_portal(self, **kwargs): data = json.loads(self.get_iscsi_portal()) print(json.dumps(data, indent=8)) def list_iscsi_targetgroup(self, **kwargs): data = json.loads(self.get_iscsi_targetgroup()) print(json.dumps(data, indent=8)) def list_iscsi_extent(self, **kwargs): data = json.loads(self.get_iscsi_extents()) print(json.dumps(data, indent=8)) def list_iscsi_initiatorgroup(self, **kwargs): data = json.loads(self.get_iscsi_authorizedinitiator()) print(json.dumps(data, indent=8)) def del_diskinfo(self, disk_id): if disk_id in (None, ""): return if self.node is None: return try: result = self.node.collector_rest_delete("/disks/%s" % disk_id) except Exception as exc: raise ex.excError(str(exc)) if "error" in result: raise ex.excError(result["error"]) return result def add_diskinfo(self, data, size=None, volume=None): if self.node is None: return try: result = self.node.collector_rest_post("/disks", { "disk_id": data["iscsi_target_extent_naa"].replace("0x", ""), "disk_devid": data["id"], "disk_name": data["iscsi_target_extent_name"], "disk_size": convert_size(size, _to="MB"), "disk_alloc": 0, "disk_arrayid": self.name, "disk_group": volume, }) except Exception as exc: raise ex.excError(str(exc)) if "error" in data: raise ex.excError(result["error"]) return result def do_action(action, array_name=None, node=None, **kwargs): o = Freenass() array = o.get_freenas(array_name) if array is None: raise ex.excError("array %s not found" % array_name) array.node = node if not hasattr(array, action): raise ex.excError("not implemented") result = getattr(array, action)(**kwargs) if result is not None: print(json.dumps(result, indent=4)) def main(argv, node=None): parser = OptParser(prog=PROG, options=OPT, actions=ACTIONS, deprecated_actions=DEPRECATED_ACTIONS, global_options=GLOBAL_OPTS) options, action = parser.parse_args(argv) kwargs = vars(options) do_action(action, node=node, **kwargs) if __name__ == "__main__": try: main(sys.argv) ret = 0 except ex.excError as exc: print(exc, file=sys.stderr) ret = 1 sys.exit(ret) opensvc-1.8~20170412/lib/resContainerLdom.py0000644000175000017500000001040513073467726020653 0ustar jkelbertjkelbertimport resources as Res import rcExceptions as ex from rcUtilities import qcall from rcUtilitiesSunOS import check_ping import resContainer from rcGlobalEnv import rcEnv import os class Ldom(resContainer.Container): def __init__(self, rid, name, guestos="SunOS", osvc_root_path=None, **kwargs): resContainer.Container.__init__(self, rid=rid, name=name, type="container.ldom", guestos=guestos, osvc_root_path=osvc_root_path, **kwargs) self.shutdown_timeout = 240 self.sshbin = '/usr/local/bin/ssh' def __str__(self): return "%s name=%s" % (Res.Resource.__str__(self), self.name) def files_to_sync(self): import glob a = [] ldomf = os.path.join(rcEnv.pathvar, 'ldom_'+self.name+'.*') files = glob.glob(ldomf) if len(files) > 0: a += files return a def check_capabilities(self): cmd = ['/usr/sbin/ldm', 'list' ] (ret, out, err) = self.call(cmd) if ret != 0: return False return True def state(self): """ ldm state : None/inactive/bound/active ldm list -p domainname outputs: VERSION DOMAIN|[varname=varvalue]* """ cmd = ['/usr/sbin/ldm', 'list', '-p', self.name] (ret, out, err) = self.call(cmd) if ret != 0: return None for word in out.split("|"): a=word.split('=') if len(a) == 2: if a[0] == 'state': return a[1] return None def ping(self): return check_ping(self.addr) def container_action(self,action): cmd = ['/usr/sbin/ldm', action, self.name] (ret, buff, err) = self.vcall(cmd) if ret != 0: raise ex.excError return None def container_start(self): """ ldm bind domain ldm start domain """ state = self.state() if state == 'None': raise ex.excError if state == 'inactive': self.container_action('bind') self.container_action('start') if state == 'bound' : self.container_action('start') def container_forcestop(self): """ ldm unbind domain ldm stop domain """ if self.state == 'active': try: self.container_action('stop') except ex.excError: pass self.container_action('unbind') def container_stop(self): """ launch init 5 into container wait_for_shutdown ldm stop domain ldm unbind domain """ state = self.state() if state == 'None': raise ex.excError if state == 'inactive': return None if state == 'bound' : self.container_action('unbind') if state == 'active' : cmd = rcEnv.rsh.split() + [ self.name, '/usr/sbin/init', '5' ] (ret, buff, err) = self.vcall(cmd) if ret == 0: try: self.log.info("wait for container shutdown") self.wait_for_fn(self.is_shutdown, self.shutdown_timeout, 2) except ex.excError: pass self.container_forcestop() def check_manual_boot(self): cmd = ['/usr/sbin/ldm', 'list-variable', 'auto-boot?', self.name] (ret, out, err) = self.call(cmd) if ret != 0: return False if out != 'auto-boot?=False' : return True self.log.info("Auto boot should be turned off") return False def is_shutdown(self): state = self.state() if state == 'inactive' or state == 'bound': return True return False def is_down(self): if self.state() == 'inactive': return True return False def is_up(self): if self.state() == 'active': return True return False opensvc-1.8~20170412/lib/checkFsInodeDarwin.py0000644000175000017500000000167713073467726021110 0ustar jkelbertjkelbertimport checks from rcUtilities import justcall class check(checks.check): chk_type = "fs_i" def find_svc(self, mountpt): for svc in self.svcs: for resource in svc.get_resources('fs'): if resource.mount_point == mountpt: return svc.svcname return '' def do_check(self): cmd = ['df', '-lPi'] (out,err,ret) = justcall(cmd) if ret != 0: return self.undef lines = out.split('\n') if len(lines) < 2: return self.undef r = [] for line in lines[1:]: l = line.split() if len(l) != 9: continue if l[5].startswith('/Volumes'): continue r.append({ 'chk_instance': l[8], 'chk_value': l[7], 'chk_svcname': self.find_svc(l[8]), }) return r opensvc-1.8~20170412/lib/resDiskLoopSunOS.py0000644000175000017500000000104713073467726020573 0ustar jkelbertjkelbertimport resources from rcGlobalEnv import * import rcStatus class Loop(resources.Resource): def is_up(self): """ Returns True if the volume group is present and activated """ return True def start(self): pass def stop(self): pass def status(self, verbose=False): if self.is_up(): return rcStatus.UP else: return rcStatus.DOWN def __init__(self, file, **kwargs): resources.Resource.__init__(self, **kwargs) self.file = file opensvc-1.8~20170412/lib/resDiskVgAdvfs.py0000644000175000017500000000563713073467726020303 0ustar jkelbertjkelbertfrom rcGlobalEnv import rcEnv import resDisk from rcUtilities import justcall import os import rcExceptions as ex import re class Disk(resDisk.Disk): def __init__(self, rid=None, name=None, **kwargs): self.label = 'fdmn ' + name resDisk.Disk.__init__(self, rid=rid, name=name, type='disk.vg', **kwargs) def disklist_name(self): return os.path.join(rcEnv.pathvar, 'vg_' + self.svc.svcname + '_' + self.name + '.disklist') def files_to_sync(self): return [self.disklist_name()] def presync(self): """ this one is exported as a service command line arg """ dl = self._disklist() import json with open(self.disklist_name(), 'w') as f: f.write(json.dumps(list(dl))) def has_it(self): """Returns True if the pool is present """ if os.path.exists("/etc/fdmns/"+self.name): return True return False def is_up(self): """Returns True if the fdmn is present and activated """ if not self.has_it(): return False cmd = [ 'showfdmn', self.name ] out, err, ret = justcall(cmd) if ret != 0: if len(err) > 0: self.status_log(err) return False if 'not active' in out: return False return True def do_start(self): pass def do_stop(self): pass def disklist(self): if not os.path.exists(self.disklist_name()): s = self.svc.group_status(excluded_groups=set(["sync", "hb"])) import rcStatus if s['overall'].status == rcStatus.UP: self.log.debug("no disklist cache file and service up ... refresh disklist cache") self.presync() else: self.log.debug("no disklist cache file and service not up ... unable to evaluate disklist") return set([]) with open(self.disklist_name(), 'r') as f: buff = f.read() import json try: dl = set(json.loads(buff)) except: self.log.error("corrupted disklist cache file %s"%self.disklist_name()) raise ex.excError return dl def _disklist(self): # return cache if initialized if len(self.disks) > 0 : return self.disks disks = set([]) if not os.path.exists("/etc/fdmns/"+self.name): return disks import glob dl = glob.glob("/etc/fdmns/"+self.name+"/*") dl = map(lambda x: os.readlink(x), dl) self.disks = set(dl) self.log.debug("found disks %s held by pool %s" % (disks, self.name)) return self.disks if __name__ == "__main__": p=Disk(name="dom1") print p._disklist() opensvc-1.8~20170412/lib/svcDict.py0000644000175000017500000051047013073467726017011 0ustar jkelbertjkelbertimport sys import os from rcGlobalEnv import rcEnv from textwrap import TextWrapper from rcNode import node_get_node_env deprecated_keywords = { "DEFAULT.service_type": "env", "disk.lvm.vgname": "name", "disk.pool.poolname": "name", "disk.vg.vgname": "name", "sync.rsync.exclude": "options", "disk.zpool.poolname": "name", } deprecated_sections = { "disk.pool": ["disk", "zpool"], "drbd": ["disk", "drbd"], "loop": ["disk", "loop"], "pool": ["disk", "zpool"], "vdisk": ["disk", "vdisk"], "vmdg": ["disk", "vmdg"], "vg": ["disk", "vg"], } class MissKeyNoDefault(Exception): pass class KeyInvalidValue(Exception): pass class Keyword(object): def __init__(self, section, keyword, rtype=None, order=100, required=False, generic=False, at=False, default=None, default_text=None, validator=None, candidates=None, strict_candidates=True, depends=[], text="", example="foo", provisioning=False): self.section = section self.keyword = keyword if rtype is None or type(rtype) == list: self.rtype = rtype else: self.rtype = [rtype] self.order = order self.generic = generic self.at = at self.required = required self.default = default self.default_text = default_text self.candidates = candidates self.strict_candidates = strict_candidates self.depends = depends self.text = text self.example = example self.provisioning = provisioning if self.default_text is None: self.default_text = self.default def __lt__(self, o): return self.order < o.order def deprecated(self): if self.rtype is None: if self.section+"."+self.keyword in deprecated_keywords: return True else: return False for rtype in self.rtype: if self.section+"."+rtype+"."+self.keyword in deprecated_keywords: return True return False def template(self): if self.deprecated(): return '' wrapper = TextWrapper(subsequent_indent="#%15s"%"", width=78) depends = " && ".join(map(lambda d: "%s in %s"%(d[0], d[1]), self.depends)) if depends == "": depends = None if type(self.candidates) in (list, tuple, set): candidates = " | ".join(map(lambda x: str(x), self.candidates)) else: candidates = str(self.candidates) if not self.strict_candidates: candidates += " ..." s = '#\n' s += "# keyword: %s\n"%self.keyword s += "# ----------------------------------------------------------------------------\n" s += "# required: %s\n"%str(self.required) s += "# provisioning: %s\n"%str(self.provisioning) s += "# default: %s\n"%str(self.default_text) s += "# candidates: %s\n"%candidates s += "# depends: %s\n"%depends s += "# scopable: %s\n"%str(self.at) s += '#\n' if self.text: wrapper = TextWrapper(subsequent_indent="#%9s"%"", width=78) s += wrapper.fill("# desc: "+self.text) + "\n" s += '#\n' if self.default_text is not None: val = self.default_text elif self.candidates and len(self.candidates) > 0: val = self.candidates[0] else: val = self.example s += ";" + self.keyword + " = " + str(val) + "\n\n" return s def __str__(self): if self.deprecated(): return '' wrapper = TextWrapper(subsequent_indent="%15s"%"", width=78) depends = "" for d in self.depends: depends += "%s in %s\n"%(d[0], d[1]) if depends == "": depends = None if type(self.candidates) in (list, tuple, set): candidates = " | ".join(map(lambda x: str(x), self.candidates)) else: candidates = str(self.candidates) if not self.strict_candidates: candidates += " ..." s = '' s += "------------------------------------------------------------------------------\n" s += "section: %s\n"%self.section s += "keyword: %s\n"%self.keyword s += "------------------------------------------------------------------------------\n" s += " required: %s\n"%str(self.required) s += " provisioning: %s\n"%str(self.provisioning) s += " default: %s\n"%str(self.default) s += " candidates: %s\n"%candidates s += " depends: %s\n"%depends s += " scopable: %s\n"%str(self.at) if self.text: s += wrapper.fill(" help: "+self.text) if self.at: s += "\n\nPrefix the value with '@ ', '@nodes ', '@drpnodes ', '@flex_primary', '@drp_flex_primary' or '@encapnodes '\n" s += "to specify a scope-specific value.\n" s += "You will be prompted for new values until you submit an empty value.\n" s += "\n" return s def form(self, d): if self.deprecated(): return # skip this form if dependencies are not met for d_keyword, d_value in self.depends: if d is None: return d if d_keyword not in d: return d if d[d_keyword] not in d_value: return d # print() the form print(self) # if we got a json seed, use its values as default # else use the Keyword object default if d and self.keyword in d: default = d[self.keyword] elif self.default is not None: default = self.default else: default = None if default is not None: default_prompt = " [%s] "%str(default) else: default_prompt = "" req_satisfied = False while True: try: val = raw_input(self.keyword+default_prompt+"> ") except EOFError: break if len(val) == 0: if req_satisfied: return d if default is None and self.required: print("value required") continue # keyword is optional, leave dictionary untouched return d elif self.at and val[0] == '@': l = val.split() if len(l) < 2: print("invalid value") continue val = ' '.join(l[1:]) d[self.keyword+l[0]] = val req_satisfied = True else: d[self.keyword] = val req_satisfied = True if self.at: # loop for more key@ = values print("More '%s' ? to step to the next parameter."%self.keyword) continue else: return d class Section(object): def __init__(self, section): self.section = section self.keywords = [] def __iadd__(self, o): if not isinstance(o, Keyword): return self self.keywords.append(o) return self def __str__(self): s = '' for keyword in sorted(self.keywords): s += str(keyword) return s def template(self): k = self.getkey("type") if k is None: return self._template() if k.candidates is None: return self._template() s = "" if not k.strict_candidates: s += self._template() for t in k.candidates: s += self._template(t) return s def _template(self, rtype=None): section = self.section if self.section in deprecated_sections: return "" if rtype and self.section+"."+rtype in deprecated_sections: return "" dpath = rcEnv.pathdoc fpath = os.path.join(dpath, "template."+section+".conf") if rtype: section += ", type "+rtype fpath = os.path.join(dpath, "template."+self.section+"."+rtype+".conf") s = "#"*78 + "\n" s += "# %-74s #\n" % " " s += "# %-74s #\n" % section s += "# %-74s #\n" % " " s += "#"*78 + "\n\n" if section in ("DEFAULT", "env"): s += "[%s]\n" % self.section else: s += "[%s#0]\n" % self.section if rtype is not None: s += ";type = " + rtype + "\n\n" for keyword in sorted(self.getkeys(rtype)): s += keyword.template() for keyword in sorted(self.getprovkeys(rtype)): s += keyword.template() if rtype is not None: for keyword in sorted(self.getkeys()): if keyword.keyword == "type": continue s += keyword.template() with open(fpath, "w") as f: f.write(s) return s def getkeys(self, rtype=None): if rtype is None: return [k for k in self.keywords if k.rtype is None and not k.provisioning] else: return [k for k in self.keywords if k.rtype and rtype in k.rtype and not k.provisioning] def getprovkeys(self, rtype=None): if rtype is None: return [k for k in self.keywords if k.rtype is None and k.provisioning] else: return [k for k in self.keywords if k.rtype and rtype in k.rtype and k.provisioning] def getkey(self, keyword, rtype=None): if '@' in keyword: l = keyword.split('@') if len(l) != 2: return None keyword, node = l if rtype: for k in self.keywords: if k.keyword == keyword and k.rtype and rtype in k.rtype: return k else: for k in self.keywords: if k.keyword == keyword: return k return None class KeywordStore(dict): def __init__(self, provision=False): self.sections = {} self.provision = provision def __iadd__(self, o): if not isinstance(o, Keyword): return self o.top = self if o.section not in self.sections: self.sections[o.section] = Section(o.section) self.sections[o.section] += o return self def __getattr__(self, key): return self.sections[str(key)] def __getitem__(self, key): k = str(key) if k not in self.sections: return Section(k) return self.sections[str(key)] def __str__(self): s = '' for section in self.sections: s += str(self.sections[section]) return s def print_templates(self): for section in sorted(self.sections.keys()): print(self.sections[section].template()) def required_keys(self, section, rtype=None): if section not in self.sections: return [] return [k for k in sorted(self.sections[section].getkeys(rtype)) if k.required is True] def purge_keywords_from_dict(self, d, section): if section == "env": return d if 'type' in d: rtype = d['type'] else: rtype = None delete_keywords = [] for keyword, value in d.items(): key = self.sections[section].getkey(keyword) if key is None and rtype is not None: key = self.sections[section].getkey(keyword, rtype) if key is None: if keyword != "rtype": print("Remove unknown keyword '%s' from section '%s'"%(keyword, section)) delete_keywords.append(keyword) for keyword in delete_keywords: del d[keyword] return d def update(self, rid, d): """ Given a resource dictionary, spot missing required keys and provide a new dictionary to merge populated by default values """ import copy completion = copy.copy(d) # decompose rid into section and rtype if rid in ('DEFAULT', 'env'): section = rid rtype = None else: if '#' not in rid: return {} l = rid.split('#') if len(l) != 2: return {} section = l[0] if 'type' in d: rtype = d['type'] elif self[section].getkey('type') is not None and \ self[section].getkey('type').default is not None: rtype = self[section].getkey('type').default else: rtype = None # validate command line dictionary for keyword, value in d.items(): if section == "env": break key = self.sections[section].getkey(keyword) if key is None and rtype is not None: key = self.sections[section].getkey(keyword, rtype) if key is None: continue if key.strict_candidates and key.candidates is not None and value not in key.candidates: print("'%s' keyword has invalid value '%s' in section '%s'"%(keyword, str(value), rid)) raise KeyInvalidValue() # add missing required keys if they have a known default value for key in self.required_keys(section, rtype): fkey = ".".join((section, str(rtype), key.keyword)) if fkey in deprecated_keywords: continue if key.keyword in d: continue if key.keyword in map(lambda x: x.split('@')[0], d.keys()): continue if key.default is None: sys.stderr.write("No default value for required key '%s' in section '%s'\n"%(key.keyword, rid)) raise MissKeyNoDefault() print("Implicitely add [%s] %s = %s" % (rid, key.keyword, str(key.default))) completion[key.keyword] = key.default # purge unknown keywords and provisioning keywords completion = self.purge_keywords_from_dict(completion, section) return completion def form_sections(self, sections): wrapper = TextWrapper(subsequent_indent="%18s"%"", width=78) candidates = set(self.sections.keys()) - set(['DEFAULT']) print("------------------------------------------------------------------------------") print("Choose a resource type to add or a resource to edit.") print("Enter 'quit' to finish the creation.") print("------------------------------------------------------------------------------") print(wrapper.fill("resource types: "+', '.join(candidates))) print(wrapper.fill("resource ids: "+', '.join(sections.keys()))) print return raw_input("resource type or id> ") def free_resource_index(self, section, sections): indices = [] for s in sections: l = s.split('#') if len(l) != 2: continue sname, sindex = l if section != sname: continue try: indices.append(int(sindex)) except: continue i = 0 while True: if i not in indices: return i i += 1 def form(self, defaults, sections): for key in sorted(self.DEFAULT.getkeys()): defaults = key.form(defaults) while True: try: section = self.form_sections(sections) except EOFError: break if section == "quit": break if '#' in section: rid = section section = section.split('#')[0] else: index = self.free_resource_index(section, sections) rid = '#'.join((section, str(index))) if section not in self.sections: print("unsupported resource type") continue for key in sorted(self.sections[section].getkeys()): if rid not in sections: sections[rid] = {} sections[rid] = key.form(sections[rid]) if 'type' in sections[rid]: specific_keys = self.sections[section].getkeys(rtype=sections[rid]['type']) if len(specific_keys) > 0: print("\nKeywords specific to the '%s' driver\n"%sections[rid]['type']) for key in sorted(specific_keys): if rid not in sections: sections[rid] = {} sections[rid] = key.form(sections[rid]) # purge the provisioning keywords sections[rid] = self.purge_keywords_from_dict(sections[rid], section) return defaults, sections class KeywordInteger(Keyword): def validator(self, val, d=None): try: val = int(val) except: return False return True class KeywordProvision(Keyword): def __init__(self): Keyword.__init__( self, section="provision", keyword="provision", default="no", candidates=('yes', 'no'), text="Say yes to provision this resource. Warning, provisioning implies destructive operations like formating." ) class KeywordLockTimeout(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="lock_timeout", required=False, order=10, default=60, text="The duration in seconds the agent wait for the action lock acquisition before aborting the action. The svcmgr --waitlock parameter overides this option." ) class KeywordMode(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="mode", required=False, order=10, default="hosted", candidates=["hosted", "sg", "vcs", "rhcs"], text="The mode decides upon disposition OpenSVC takes to bring a service up or down : virtualized services need special actions to prepare and boot the container for example, which is not needed for 'hosted' services." ) class KeywordPkgName(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="pkg_name", at=True, required=False, order=11, depends=[('mode', ["vcs", "sg", "rhcs"])], text="The wrapped cluster package name, as known to the cluster manager in charge." ) class KeywordRollback(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="rollback", at=True, required=False, order=11, default=True, text="If set to False, the default rollback on action error is inhibited, leaving the service in its half-started state." ) class KeywordCompSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="comp_schedule", at=True, required=False, order=11, default="00:00-06:00@361", text="The service compliance run schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordStatusSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="status_schedule", at=True, required=False, order=11, default="@10", text="The service status evaluation schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordDefaultSyncSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="sync_schedule", at=True, required=False, order=11, default="04:00-06:00@121", text="The default sync resources schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordResinfoSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="resinfo_schedule", at=True, required=False, order=11, default="@60", text="The service resource info push schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordMonitorSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="monitor_schedule", at=True, required=False, order=11, default="@1", text="The service resource monitor schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordPushSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="push_schedule", at=True, required=False, order=11, default="00:00-06:00@361", text="The service configuration emission to the collector schedule. See usr/share/doc/template.node.conf for the schedule syntax." ) class KeywordFlexPrimary(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="flex_primary", at=True, required=False, order=11, depends=[('cluster_type', ["flex"])], default_text="", text="The node in charge of syncing the other nodes. --cluster actions on the flex_primary are execute on all peer nodes (ie, not drpnodes)." ) class KeywordDrpFlexPrimary(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="drp_flex_primary", at=True, required=False, order=11, depends=[('cluster_type', ["flex"])], default_text="", text="The drpnode in charge of syncing the other drpnodes. --cluster actions on the drp_flex_primary are execute on all drpnodes (ie, not pri nodes)." ) class KeywordDockerSwarmManagers(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_swarm_managers", order=20, at=True, required=False, text="List of nodes promoted as docker swarm managers.The flex primary node is implicitely a manager. Whitespace separated." ) class KeywordDockerExe(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_exe", at=True, required=False, order=12, text="If you have multiple docker versions installed and want the service to stick to a version whatever the PATH definition, you should set this parameter to the full path to the docker executable.", example="/usr/bin/docker-1.8" ) class KeywordDockerDataDir(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_data_dir", at=True, required=False, order=12, text="If the service has docker-type container resources and docker_daemon_private is set to True, the service handles the startup of a private docker daemon. Its socket is //docker.sock, and its data directory must be specified using this parameter. This organization is necessary to enable service relocalization.", example="/srv/svc1/data/docker" ) class KeywordDockerDaemonPrivate(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_daemon_private", at=True, required=False, default=True, order=11, text="If set to False, this service will use the system's shared docker daemon instance. This is parameter is forced to False on non-Linux systems.", example="True" ) class KeywordDockerDaemonArgs(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_daemon_args", at=True, required=False, order=12, text="If the service has docker-type container resources, the service handles the startup of a private docker daemon. OpenSVC sets the socket and data dir parameters. Admins can set extra parameters using this keyword. For example, it can be useful to set the --ip parameter for a docker registry service.", example="--ip 1.2.3.4" ) class KeywordDockerSwarmArgs(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="docker_swarm_args", at=True, required=False, order=12, text="The arguments passed to docker swarm init on the flex primary, and to docker swarm join on the the other nodes. The --token argument must not be specified, as it is handled by the agent. Scoping this parameter permits to set additional parameters on the flex_primary for use with swarm init only, like --autolock.", example="--advertize-addr {ip#0.ipname} --listen-addr {ip#0.ipname}", ) class KeywordSubsetParallel(Keyword): def __init__(self): Keyword.__init__( self, section="subset", keyword="parallel", at=True, candidates=(True, False), default=False, text="If set to true, actions are executed in parallel amongst the subset member resources.", required=False, order=2 ) class KeywordStonithType(Keyword): def __init__(self): Keyword.__init__( self, section="stonith", keyword="type", at=True, candidates=["ilo", "callout"], text="The type of stonith.", required=True, order=1 ) class KeywordStonithTarget(Keyword): def __init__(self): Keyword.__init__( self, section="stonith", keyword="target", at=True, text="The server management console to pass the stonith command to, as defined in the corresponding auth.conf section title.", required=True, order=2 ) class KeywordStonithCalloutCmd(Keyword): def __init__(self): Keyword.__init__( self, section="stonith", rtype="callout", at=True, keyword="cmd", text="The command to execute on target to stonith.", required=True, order=3 ) class KeywordContainerType(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="type", at=True, candidates=rcEnv.vt_supported, text="The type of container.", required=True, order=1 ) class KeywordContainerZoneDeleteOnStop(Keyword): def __init__(self): Keyword.__init__( self, section="container", rtype="zone", keyword="delete_on_stop", at=True, candidates=(True, False), text="If set to true, the zone configuration is deleted after a resource stop. The agent maintains an export of the configuration for the next start. This export is replicated to the other nodes and drp nodes so they can take over the zone even if it is completely hosted on a shared disk.", default=False, required=False, order=1 ) class KeywordDockerDockerService(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="docker_service", at=True, order=9, required=False, rtype="docker", default=False, candidates=(True, False), text="If set to True, run this container as a docker service, which is possible if the cluster_type is set to flex and the docker swarm properly initialized.", example=False ) class KeywordDockerRunImage(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="run_image", at=True, order=9, required=False, rtype="docker", text="The docker image pull, and run the container with.", example="83f2a3dd2980" ) class KeywordDockerRunCommand(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="run_command", at=True, order=1, required=False, rtype="docker", text="The command to execute in the docker container on run.", example="/opt/tomcat/bin/catalina.sh" ) class KeywordDockerRunArgs(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="run_args", at=True, order=2, required=False, rtype="docker", text="Extra arguments to pass to the docker run command, like volume and port mappings.", example="-v /opt/docker.opensvc.com/vol1:/vol1:rw -p 37.59.71.25:8080:8080" ) class KeywordVirtinst(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="virtinst", rtype=["kvm", "xen", "ovm"], text="The virt-install command to use to create the container.", required=True, provisioning=True ) class KeywordSnap(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="snap", rtype=["kvm", "xen", "ovm", "zone", "esx"], text="The target snapshot/clone full path containing the new container disk files.", required=True, provisioning=True ) class KeywordSnapof(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="snapof", rtype=["kvm", "xen", "ovm", "zone", "esx"], text="The snapshot origin full path containing the reference container disk files.", required=True, provisioning=True ) class KeywordContainerOrigin(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="container_origin", rtype="zone", text="The origin container having the reference container disk files.", required=True, provisioning=True ) class KeywordJailRoot(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="jailroot", rtype="jail", text="Sets the root fs directory of the container", required=True, provisioning=False ) class KeywordLxcCf(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="cf", rtype="lxc", text="Defines a lxc configuration file in a non-standard location.", required=False, provisioning=True, example="/srv/mycontainer/config" ) class KeywordRootfs(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="rootfs", rtype=["lxc", "vz", "zone"], text="Sets the root fs directory of the container", required=True, provisioning=True ) class KeywordTemplate(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="template", rtype=["lxc", "vz", "zone"], text="Sets the url of the template unpacked into the container root fs.", required=True, provisioning=True ) class KeywordVmName(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="name", at=True, order=2, rtype=rcEnv.vt_supported, text="This need to be set if the virtual machine name is different from the service name." ) class KeywordContainerRcmd(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="rcmd", at=True, order=2, rtype="lxc", example="lxc-attach -e -n osvtavnprov01 -- ", text="An container remote command override the agent default" ) class KeywordOsvcRootPath(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="osvc_root_path", at=True, order=2, rtype=rcEnv.vt_supported, example="/opt/opensvc", text="If the OpenSVC agent is installed via package in the container, this parameter must not be set or set to 'None'. Else the value can be set to the fullpath hosting the agent installed from sources." ) class KeywordGuestos(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="guestos", at=True, rtype=rcEnv.vt_supported, order=11, candidates=["unix", "windows"], default=None, text="The operating system in the virtual machine." ) class KeywordJailIps(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="ips", at=True, rtype="jail", order=11, text="The ipv4 addresses of the jail." ) class KeywordJailIp6s(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="ip6s", at=True, rtype="jail", order=11, text="The ipv6 addresses of the jail." ) class KeywordSharedIpGroup(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="shared_ip_group", at=True, order=11, rtype=rcEnv.vt_cloud, text="The cloud shared ip group name to allocate a public ip from." ) class KeywordSize(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="size", at=True, order=11, rtype=rcEnv.vt_cloud, text="The cloud vm size, as known to the cloud manager. Example: tiny." ) class KeywordKeyName(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="key_name", at=True, order=11, rtype=rcEnv.vt_cloud, text="The key name, as known to the cloud manager, to trust in the provisioned vm." ) class KeywordSrpPrmCores(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="prm_cores", order=11, rtype="srp", default=1, provisioning=True, text="The number of core to bind the SRP container to." ) class KeywordSrpIp(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="ip", at=True, order=11, rtype="srp", provisioning=True, text="The ip name or addr used to create the SRP container." ) class KeywordSrpRootpath(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="rootpath", at=True, order=11, rtype="srp", provisioning=True, text="The path of the SRP container root filesystem." ) class KeywordCloudId(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="cloud_id", at=True, order=11, rtype=rcEnv.vt_cloud, text="The cloud id as configured in node.conf. Example: cloud#1." ) class KeywordVmUuid(Keyword): def __init__(self): Keyword.__init__( self, section="container", keyword="uuid", at=True, order=11, rtype="ovm", text="The virtual machine unique identifier used to pass commands on the VM." ) class KeywordAntiAffinity(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="anti_affinity", at=True, order=15, required=False, default=None, text="A whitespace separated list of services this service is not allowed to be started on the same node. The svcmgr --ignore-affinity option can be set to override this policy.", example="svc1 svc2" ) class KeywordPrKey(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="prkey", order=15, at=True, required=False, text="Defines a specific default persistent reservation key for the service. A prkey set in a resource takes priority. If no prkey is specified in the service nor in the DEFAULT section, the prkey in node.conf is used. If node.conf has no prkey set, the hostid is computed and written in node.conf." ) class KeywordNoPreemptAbort(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="no_preempt_abort", order=15, at=True, required=False, candidates=(True, False), default=False, text="If set to 'true', OpenSVC will preempt scsi reservation with a preempt command instead of a preempt and and abort. Some scsi target implementations do not support this last mode (esx). If set to 'false' or not set, 'no_preempt_abort' can be activated on a per-resource basis." ) class KeywordCluster(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="cluster", order=15, required=False, default=None, text="The symbolic name of the cluster. Used to label shared disks represented to tiers-2 consumers like containers.", example="cluster1" ) class KeywordShowDisabled(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="show_disabled", at=True, order=15, required=False, default=True, candidates=[True, False], text="Specifies if the disabled resources must be included in the print status and json status output." ) class KeywordClusterType(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="cluster_type", at=True, order=15, required=False, default="failover", candidates=["failover", "flex", "autoflex"], text="failover: the service is allowed to be up on one node at a time. allactive: the service must be up on all nodes. flex: the service can be up on n out of m nodes (n <= m), n/m must be in the [flex_min_nodes, flex_max_nodes] range. autoflex: same as flex, but charge the collector to start the service on passive nodes when the average %cpu usage on active nodes > flex_cpu_high_threshold and stop the service on active nodes when the average %cpu usage on active nodes < flex_cpu_low_threshold." ) class KeywordFlexMinNodes(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="flex_min_nodes", order=16, required=False, default=1, depends=[("cluster_type", ["flex", "autoflex"])], text="Minimum number of active nodes in the cluster. Below this number alerts are raised by the collector, and the collector won't stop any more service instances." ) class KeywordFlexMaxNodes(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="flex_max_nodes", order=16, required=False, default=10, depends=[("cluster_type", ["flex", "autoflex"])], text="Maximum number of active nodes in the cluster. Above this number alerts are raised by the collector, and the collector won't start any more service instances. 0 means unlimited." ) class KeywordFlexCpuMinThreshold(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="flex_cpu_min_threshold", order=16, required=False, default=10, depends=[("cluster_type", ["flex", "autoflex"])], text="Average CPU usage across the active cluster nodes below which the collector raises alerts and decides to stop service instances with autoflex cluster type." ) class KeywordFlexCpuMaxThreshold(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="flex_cpu_max_threshold", order=16, required=False, default=70, depends=[("cluster_type", ["flex", "autoflex"])], text="Average CPU usage across the active cluster nodes above which the collector raises alerts and decides to start new service instances with autoflex cluster type." ) class KeywordServiceType(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="service_type", order=15, required=False, candidates=rcEnv.allowed_svc_envs, text="A non-PRD service can not be brought up on a PRD node, but a PRD service can be startup on a non-PRD node (in a DRP situation). The default value is the node env." ) class KeywordServiceEnv(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="env", order=15, required=True, default=node_get_node_env(), default_text="", candidates=rcEnv.allowed_svc_envs, text="A non-PRD service can not be brought up on a PRD node, but a PRD service can be startup on a non-PRD node (in a DRP situation). The default value is the node env." ) class KeywordNodes(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="nodes", order=20, at=True, required=True, default=rcEnv.nodename, default_text="", text="List of cluster local nodes able to start the service. Whitespace separated." ) class KeywordAutostartNode(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="autostart_node", order=20, at=True, required=False, default=rcEnv.nodename, default_text="", text="A whitespace-separated list subset of 'nodes'. Defines the nodes where the service will try to start on upon node reboot. On a failover cluster there should only be one autostart node and the start-up will fail if the service is already up on another node though. If not specified, the service will never be started at node boot-time, which is rarely the expected behaviour." ) class KeywordDrpnode(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="drpnode", order=21, at=True, text="The backup node where the service is activated in a DRP situation. This node is also a data synchronization target for 'sync' resources.", example="node1" ) class KeywordDrpnodes(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="drpnodes", order=21, at=True, text="Alternate backup nodes, where the service could be activated in a DRP situation if the 'drpnode' is not available. These nodes are also data synchronization targets for 'sync' resources.", example="node1 node2" ) class KeywordEncapnodes(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="encapnodes", order=21, text="The list of containers handled by this service and with an OpenSVC agent installed to handle the encapsulated resources. With this parameter set, parameters can be scoped with the @encapnodes suffix.", example="vm1 vm2" ) class KeywordApp(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="app", order=24, default="DEFAULT", text="Used to identify who is responsible for is service, who is billable and provides a most useful filtering key. Better keep it a short code." ) class KeywordComment(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="comment", order=25, text="Helps users understand the role of the service, which is nice to on-call support people having to operate on a service they are not usually responsible for." ) class KeywordScsireserv(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="scsireserv", at=True, order=25, default=False, candidates=(True, False), text="If set to 'true', OpenSVC will try to acquire a type-5 (write exclusive, registrant only) scsi3 persistent reservation on every path to disks of every disk group attached to this service. Existing reservations are preempted to not block service start-up. If the start-up was not legitimate the data are still protected from being written over from both nodes. If set to 'false' or not set, 'scsireserv' can be activated on a per-resource basis." ) class KeywordBwlimit(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="bwlimit", order=25, text="Bandwidth limit in KB applied to all rsync transfers. Leave empty to enforce no limit.", example="3000" ) class KeywordSyncInterval(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="sync_interval", order=26, default=121, text="Set the minimum delay between syncs in minutes. If a sync is triggered through crond or manually, it is skipped if last sync occurred less than 'sync_min_delay' ago. The mecanism is enforced by a timestamp created upon each sync completion in /sync/[service]![dst]" ) class KeywordSyncMaxDelay(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="sync_max_delay", order=27, default=1440, text="Unit is minutes. This sets to delay above which the sync status of the resource is to be considered down. Should be set according to your application service level agreement. The cron job frequency should be set between 'sync_min_delay' and 'sync_max_delay'" ) class KeywordPresnapTrigger(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="presnap_trigger", order=28, text="Define a command to run before creating snapshots. This is most likely what you need to use plug a script to put you data in a coherent state (alter begin backup and the like).", example="/srv/svc1/etc/init.d/pre_snap.sh" ) class KeywordPostsnapTrigger(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="postsnap_trigger", order=29, text="Define a command to run after snapshots are created. This is most likely what you need to use plug a script to undo the actions of 'presnap_trigger'.", example="/srv/svc1/etc/init.d/post_snap.sh" ) class KeywordMonitorAction(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="monitor_action", at=True, order=30, default=None, candidates=("reboot", "crash", "freezestop"), text="The action to take when a monitored resource is not up nor standby up, and if the resource restart procedure has failed.", example="reboot" ) class KeywordCreatePg(Keyword): def __init__(self): Keyword.__init__( self, section="DEFAULT", keyword="create_pg", order=30, default=True, candidates=(True, False), text="Use process containers when possible. Containers allow capping memory, swap and cpu usage per service. Lxc containers are naturally containerized, so skip containerization of their startapp." ) class KeywordPgCpus(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_cpus", order=31, depends=[('create_pg', [True])], text="Allow service process to bind only the specified cpus. Cpus are specified as list or range : 0,1,2 or 0-2", example="0-2" ) class KeywordPgMems(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_mems", order=31, depends=[('create_pg', [True])], text="Allow service process to bind only the specified memory nodes. Memory nodes are specified as list or range : 0,1,2 or 0-2", example="0-2" ) class KeywordPgCpuShare(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_cpu_shares", order=31, depends=[('create_pg', [True])], text="Kernel default value is used, which usually is 1024 shares. In a cpu-bound situation, ensure the service does not use more than its share of cpu ressource. The actual percentile depends on shares allowed to other services.", example="512" ) class KeywordPgCpuQuota(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_cpu_quota", order=31, depends=[('create_pg', [True])], text="The percent ratio of one core to allocate to the process group if % is specified, else the absolute value to set in the process group parameter. For example, on Linux cgroups, -1 means unlimited, and a positive absolute value means the number of microseconds to allocate each period. 50%@all means 50% of all cores, and 50%@2 means 50% of two cores.", example="50%@all" ) class KeywordPgMemOomControl(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_mem_oom_control", order=31, depends=[('create_pg', [True])], text="A flag (0 or 1) that enables or disables the Out of Memory killer for a cgroup. If enabled (0), tasks that attempt to consume more memory than they are allowed are immediately killed by the OOM killer. The OOM killer is enabled by default in every cgroup using the memory subsystem; to disable it, write 1.", example="1" ) class KeywordPgMemLimit(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_mem_limit", order=31, depends=[('create_pg', [True])], text="Ensures the service does not use more than specified memory (in bytes). The Out-Of-Memory killer get triggered in case of tresspassing.", example="512000000" ) class KeywordPgVmemLimit(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_vmem_limit", order=31, depends=[('create_pg', [True])], text="Ensures the service does not use more than specified memory+swap (in bytes). The Out-Of-Memory killer get triggered in case of tresspassing. The specified value must be greater than pg_mem_limit.", example="1024000000" ) class KeywordPgMemSwappiness(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_mem_swappiness", order=31, depends=[('create_pg', [True])], text="Set a swappiness value for the process group.", example="40" ) class KeywordPgBlkioWeight(KeywordInteger): def __init__(self): KeywordInteger.__init__( self, section="DEFAULT", keyword="pg_blkio_weight", order=31, depends=[('create_pg', [True])], text="Block IO relative weight. Value: between 10 and 1000. Kernel default: 1000.", example="50" ) class KeywordAppScript(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="script", at=True, order=9, required=True, text="Full path to the app launcher script. Or its basename if the file is hosted in the .d path." ) class KeywordAppTimeout(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="timeout", order=9, at=True, required=False, text="Wait for seconds max before declaring the app launcher action a failure. If no timeout is specified, the agent waits indefinitely for the app launcher to return. The timeout parameter can be coupled with optional=True to not abort a service start when an app launcher did not return.", example="180" ) class KeywordAppStart(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="start", at=True, order=10, required=False, text="Start up sequencing number." ) class KeywordAppStop(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="stop", at=True, order=11, required=False, text="Stop sequencing number." ) class KeywordAppCheck(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="check", at=True, order=11, required=False, text="Check up sequencing number." ) class KeywordAppInfo(Keyword): def __init__(self): Keyword.__init__( self, section="app", keyword="info", at=True, order=12, required=False, text="Info up sequencing number." ) class KeywordSyncType(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="type", order=10, required=True, candidates=("rsync", "docker", "dds", "netapp", "symsrdfs", "zfs", "btrfs", "symclone", "symsnap", "hp3par", "hp3parsnap", "evasnap", "ibmdssnap", "dcssnap", "dcsckpt", "necismsnap", "zfssnap", "btrfssnap", "rados", "s3"), default="rsync", text="Point a sync driver to use." ) class KeywordSyncDockerTarget(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="target", rtype="docker", order=11, at=True, required=True, default=None, candidates=["nodes", "drpnodes", "nodes drpnodes"], text="Destination nodes of the sync." ) class KeywordSyncS3Snar(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="snar", rtype="s3", order=10, at=True, required=False, example="/srv/mysvc/var/sync.1.snar", text="The GNU tar snar file full path. The snar file stored the GNU tar metadata needed to do an incremental tarball. If the service fails over shared disks the snar file should be stored there, so the failover node can continue the incremental cycle." ) class KeywordSyncS3Src(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="src", rtype="s3", order=10, at=True, required=True, example="/srv/mysvc/tools /srv/mysvc/apps*", text="Source globs as passed as paths to archive to a tar command." ) class KeywordSyncS3Options(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="options", rtype="s3", order=10, at=True, required=False, example="--exclude *.pyc", text="Options passed to GNU tar for archiving." ) class KeywordSyncS3Bucket(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="bucket", rtype="s3", order=10, at=True, required=True, example="opensvc-myapp", text="The name of the S3 bucket to upload the backup to." ) class KeywordSyncS3FullSchedule(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="full_schedule", rtype="s3", order=10, at=True, required=True, example="@1441 sun", default="@1441 sun", text="The schedule of full backups. sync_update actions are triggered according to the resource 'schedule' parameter, and do a full backup if the current date matches the 'full_schedule' parameter or an incremental backup otherwise." ) class KeywordSyncZfsSnapRecursive(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="recursive", rtype="zfssnap", order=10, at=True, required=False, example="true", default=True, text="Set to true to snap recursively the datasets." ) class KeywordSyncZfsSnapName(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="name", rtype="zfssnap", order=10, at=True, required=False, example="weekly", text="A name included in the snapshot name to avoid retention conflicts between multiple zfs snapshot resources. A full snapshot name is formatted as ..snap.. Example: data.weekly.snap.2016-03-09.10:09:52" ) class KeywordSyncZfsSnapDataset(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="dataset", rtype="zfssnap", order=10, at=True, required=True, example="svc1fs/data svc1fs/log", text="A whitespace separated list of datasets to snapshot." ) class KeywordSyncZfsSnapKeep(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="keep", rtype="zfssnap", order=10, at=True, required=True, default=3, example="3", text="The maximum number of snapshots to retain." ) class KeywordSyncBtrfsSnapName(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="name", rtype="btrfssnap", order=10, at=True, required=False, example="weekly", text="A name included in the snapshot name to avoid retention conflicts between multiple btrfs snapshot resources. A full snapshot name is formatted as ..snap.. Example: data.weekly.snap.2016-03-09.10:09:52" ) class KeywordSyncBtrfsSnapSubvol(Keyword): def __init__(self): Keyword.__init__( self, section="sync", keyword="subvol", rtype="btrfssnap", order=10, at=True, required=True, example="svc1fs:data svc1fs:log", text="A whitespace separated list of