clustershell-1.6/0000755000130500135250000000000011741572334013421 5ustar thiellgpocreclustershell-1.6/doc/0000755000130500135250000000000011741572333014165 5ustar thiellgpocreclustershell-1.6/doc/examples/0000755000130500135250000000000011741572334016004 5ustar thiellgpocreclustershell-1.6/doc/examples/check_nodes.py0000755000130500135250000001003511741571247020627 0ustar thiellgpocre#!/usr/bin/python # check_nodes.py: ClusterShell simple example script. # # This script runs a simple command on remote nodes and report node # availability (basic health check) and also min/max boot dates. # It shows an example of use of Task, NodeSet and EventHandler objects. # Feel free to copy and modify it to fit your needs. # # Usage example: ./check_nodes.py -n node[1-99] import optparse from datetime import date, datetime import time from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import task_self class CheckNodesResult: """Our result class""" def __init__(self): """Initialize result class""" self.nodes_ok = NodeSet() self.nodes_ko = NodeSet() self.min_boot_date = None self.max_boot_date = None def show(self): """Display results""" if self.nodes_ok: print "%s: OK (boot date: min %s, max %s)" % \ (self.nodes_ok, self.min_boot_date, self.max_boot_date) if self.nodes_ko: print "%s: FAILED" % self.nodes_ko class CheckNodesHandler(EventHandler): """Our ClusterShell EventHandler""" def __init__(self, result): """Initialize our event handler with a ref to our result object.""" EventHandler.__init__(self) self.result = result def ev_read(self, worker): """Read event from remote nodes""" node = worker.current_node # this is an example to demonstrate remote result parsing bootime = " ".join(worker.current_msg.strip().split()[2:]) date_boot = None for fmt in ("%Y-%m-%d %H:%M",): # formats with year try: # datetime.strptime() is Python2.5+, use old method instead date_boot = datetime(*(time.strptime(bootime, fmt)[0:6])) except ValueError: pass for fmt in ("%b %d %H:%M",): # formats without year try: date_boot = datetime(date.today().year, \ *(time.strptime(bootime, fmt)[1:6])) except ValueError: pass if date_boot: if not self.result.min_boot_date or \ self.result.min_boot_date > date_boot: self.result.min_boot_date = date_boot if not self.result.max_boot_date or \ self.result.max_boot_date < date_boot: self.result.max_boot_date = date_boot self.result.nodes_ok.add(node) else: self.result.nodes_ko.add(node) def ev_timeout(self, worker): """Timeout occurred on some nodes""" self.result.nodes_ko.add(NodeSet.fromlist(worker.iter_keys_timeout())) def ev_close(self, worker): """Worker has finished (command done on all nodes)""" self.result.show() def main(): """ Main script function """ # Initialize option parser parser = optparse.OptionParser() parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="Enable debug mode") parser.add_option("-n", "--nodes", action="store", dest="nodes", default="@all", help="Target nodes (default @all group)") parser.add_option("-f", "--fanout", action="store", dest="fanout", default="128", help="Fanout window size (default 128)", type=int) parser.add_option("-t", "--timeout", action="store", dest="timeout", default="5", help="Timeout in seconds (default 5)", type=float) options, _ = parser.parse_args() # Get current task (associated to main thread) task = task_self() nodes_target = NodeSet(options.nodes) task.set_info("fanout", options.fanout) if options.debug: print "nodeset : %s" % nodes_target task.set_info("debug", True) # Create ClusterShell event handler handler = CheckNodesHandler(CheckNodesResult()) # Schedule remote command and run task (blocking call) task.run("who -b", nodes=nodes_target, handler=handler, \ timeout=options.timeout) if __name__ == '__main__': main() clustershell-1.6/doc/extras/0000755000130500135250000000000011741572333015473 5ustar thiellgpocreclustershell-1.6/doc/extras/vim/0000755000130500135250000000000011741572333016266 5ustar thiellgpocreclustershell-1.6/doc/extras/vim/ftdetect/0000755000130500135250000000000011741572334020071 5ustar thiellgpocreclustershell-1.6/doc/extras/vim/ftdetect/clustershell.vim0000644000130500135250000000043011741571247023316 0ustar thiellgpocre" " Installed As: vim/ftdetect/clustershell.vim " au BufNewFile,BufRead *clush.conf setlocal filetype=clushconf au BufNewFile,BufRead *groups.conf setlocal filetype=groupsconf au BufNewFile,BufRead *groups.conf.d/*.conf setlocal filetype=groupsconf clustershell-1.6/doc/extras/vim/syntax/0000755000130500135250000000000011741572334017615 5ustar thiellgpocreclustershell-1.6/doc/extras/vim/syntax/groupsconf.vim0000644000130500135250000000317411741571247022526 0ustar thiellgpocre " Vim syntax file for ClusterShell groups.conf " For version 5.x: Clear all syntax items " For version 6.x: Quit when a syntax file was already loaded if version < 600 syntax clear elseif exists("b:current_syntax") finish endif " shut case off syn case ignore " Main/default syn match groupsDefaultValue "\(:\|=\)\s*\w\+$"ms=s+1 contained syn match groupsColonValue "\(:\|=\).*" contained contains=groupsDefaultValue syn match groupsDefaultKey "^default\(:\|=\).*$" contains=groupsColonValue syn match groupsGroupsDirKey "^groupsdir\(:\|=\)" " Sources syn match groupsVars "\(\$GROUP\|\$NODE\)" contained syn match groupsKeys "^\w\+\(:\|=\)"me=e-1 contained syn match groupsKeyValue "^\(map\|all\|list\|reverse\)\+\(:\|=\).*$" contains=groupsKeys,groupsVars syn match groupsComment "#.*$" syn match groupsComment ";.*$" syn match groupsHeader "\[\w\+\]" syn match groupsMainHeader "\[Main\]" " Define the default highlighting. " For version 5.7 and earlier: only when not done already " For version 5.8 and later: only when an item doesn't have highlighting yet if version >= 508 || !exists("did_groupsconf_syntax_inits") if version < 508 let did_groupsconf_syntax_inits = 1 command -nargs=+ HiLink hi link else command -nargs=+ HiLink hi def link endif HiLink groupsHeader Special HiLink groupsComment Comment HiLink groupsMainHeader Constant HiLink groupsDefaultKey Identifier HiLink groupsGroupsDirKey Identifier HiLink groupsDefaultValue Special HiLink groupsKeys Identifier HiLink groupsVars Keyword delcommand HiLink endif let b:current_syntax = "groupsconf" " vim:ts=8 clustershell-1.6/doc/extras/vim/syntax/clushconf.vim0000644000130500135250000000210711741571247022320 0ustar thiellgpocre " Vim syntax file for clush.conf " For version 5.x: Clear all syntax items " For version 6.x: Quit when a syntax file was already loaded if version < 600 syntax clear elseif exists("b:current_syntax") finish endif " shut case off syn case ignore syn match clushComment "#.*$" syn match clushComment ";.*$" syn match clushHeader "\[\w\+\]" syn keyword clushKeys fanout command_timeout connect_timeout color fd_max history_size node_count verbosity ssh_user ssh_path ssh_options " Define the default highlighting. " For version 5.7 and earlier: only when not done already " For version 5.8 and later: only when an item doesn't have highlighting yet if version >= 508 || !exists("did_clushconf_syntax_inits") if version < 508 let did_clushconf_syntax_inits = 1 command -nargs=+ HiLink hi link else command -nargs=+ HiLink hi def link endif HiLink clushHeader Special HiLink clushComment Comment HiLink clushLabel Type HiLink clushKeys Identifier delcommand HiLink endif let b:current_syntax = "clushconf" " vim:ts=8 clustershell-1.6/doc/man/0000755000130500135250000000000011741572333014740 5ustar thiellgpocreclustershell-1.6/doc/man/man1/0000755000130500135250000000000011741572334015575 5ustar thiellgpocreclustershell-1.6/doc/man/man1/nodeset.10000644000130500135250000002073611741571247017332 0ustar thiellgpocre.\" Man page generated from reStructeredText. . .TH NODESET 1 "2012-03-31" "1.6" "ClusterShell User Manual" .SH NAME nodeset \- compute advanced nodeset operations . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .INDENT 0.0 .INDENT 3.5 .sp \fCnodeset\fP [COMMAND] [OPTIONS] [nodeset1 [\-ixX] nodeset2|...] .UNINDENT .UNINDENT .SH DESCRIPTION .sp \fCnodeset\fP is an utility command provided with the ClusterShell library which implements some features of ClusterShell\(aqs NodeSet and RangeSet Python classes. It provides easy manipulation of indexed cluster nodes and node groups. It is automatically bound to the library node group resolution mechanism. Thus, \fCnodeset\fP is especially useful to enhance cluster aware administration shell scripts. .SH OPTIONS .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .TP .B \-\-version . show program\(aqs version number and exit .TP .B \-h, \-\-help . show this help message and exit .TP .BI \-s \ GROUPSOURCE, \ \-\-groupsource\fB= GROUPSOURCE . optional \fCgroups.conf\fP(5) group source to use .UNINDENT .INDENT 0.0 .TP .B Commands: .INDENT 7.0 .TP .B \-c, \-\-count . show number of nodes in nodeset(s) .TP .B \-e, \-\-expand . expand nodeset(s) to separate nodes (see also \-S \fISEPARATOR\fP) .TP .B \-f, \-\-fold . fold nodeset(s) (or separate nodes) into one nodeset .TP .B \-l, \-\-list . list node groups, list node groups and nodes (\fC\-ll\fP) or list node groups, nodes and node count (\fC\-lll\fP). When no argument is specified at all, this command will list all node group names found in selected group source (see also \-s \fIGROUPSOURCE\fP). If any nodesets are specified as argument, this command will find node groups these nodes belongs to (individually). Optionally for each group, the fraction of these nodes being member of the group may be displayed (with \fC\-ll\fP), and also member count/total group node count (with \fC\-lll\fP). If a single hyphen\-minus (\-) is given as a nodeset, it will be read from standard input. .TP .B \-r, \-\-regroup . fold nodes using node groups (see \-s \fIGROUPSOURCE\fP) .TP .B \-\-groupsources . list all configured group sources (see \fCgroups.conf\fP(5)) .UNINDENT .TP .B Operations: .INDENT 7.0 .TP .BI \-x \ SUB_NODES, \ \-\-exclude\fB= SUB_NODES . exclude specified nodeset .TP .BI \-i \ AND_NODES, \ \-\-intersection\fB= AND_NODES . calculate nodesets intersection .TP .BI \-X \ XOR_NODES, \ \-\-xor\fB= XOR_NODES . calculate symmetric difference between nodesets .UNINDENT .TP .B Options: .INDENT 7.0 .TP .B \-a, \-\-all . call external node groups support to display all nodes .TP .BI \-\-autostep\fB= AUTOSTEP . auto step threshold number when folding nodesets, if not specified, auto step is disabled. Example: autostep=4, "node2 node4 node6" folds in node[2,4,6] but autostep=3, "node2 node4 node6" folds in node[2\-6/2] .TP .B \-d, \-\-debug . output more messages for debugging purpose .TP .B \-q, \-\-quiet . be quiet, print essential output only .TP .B \-R, \-\-rangeset . switch to RangeSet instead of NodeSet. Useful when working on numerical cluster ranges, eg. 1,5,18\-31 .TP .B \-G, \-\-groupbase . hide group source prefix (always \fI@groupname\fP) .TP .BI \-S \ SEPARATOR, \ \-\-separator\fB= SEPARATOR . separator string to use when expanding nodesets (default: \(aq \(aq) .TP .BI \-I \ SLICE_RANGESET, \ \-\-slice\fB= SLICE_RANGESET . return sliced off result; examples of SLICE_RANGESET are "0" for simple index selection, or "1\-9/2,16" for complex rangeset selection .TP .BI \-\-split\fB= MAXSPLIT . split result into a number of subsets .TP .B \-\-contiguous . split result into contiguous subsets (ie. for nodeset, subsets will contain nodes with same pattern name and a contiguous range of indexes, like foobar[1\-100]; for rangeset, subsets with consists in contiguous index ranges)""" .UNINDENT .UNINDENT .UNINDENT .UNINDENT .sp For a short explanation of these options, see \fC\-h, \-\-help\fP. .sp If a single hyphen\-minus (\-) is given as a nodeset, it will be read from standard input. .SH EXTENDED PATTERNS .sp The \fCnodeset\fP command benefits from ClusterShell NodeSet basic arithmetic addition. This feature extends recognized string patterns by supporting operators matching all Operations seen previously. String patterns are read from left to right, by proceeding any character operators accordinately. .INDENT 0.0 .TP .B Supported character operators .INDENT 7.0 .TP .B \fC,\fP .sp indicates that the \fIunion\fP of both left and right nodeset should be computed before continuing .TP .B \fC!\fP .sp indicates the \fIdifference\fP operation .TP .B \fC&\fP .sp indicates the \fIintersection\fP operation .TP .B \fC^\fP .sp indicates the \fIsymmetric difference\fP (XOR) operation .UNINDENT .sp Care should be taken to escape these characters as needed when the shell does not interpret them literally. .TP .B Examples of use of extended patterns .INDENT 7.0 .TP .B $ nodeset \-f node[0\-7],node[8\-10] .UNINDENT .nf node[0\-10] .fi .sp .INDENT 7.0 .TP .B $ nodeset \-f node[0\-10]!node[8\-10] .UNINDENT .nf node[0\-7] .fi .sp .INDENT 7.0 .TP .B $ nodeset \-f node[0\-10]&node[5\-13] .UNINDENT .nf node[5\-10] .fi .sp .INDENT 7.0 .TP .B $ nodeset \-f node[0\-10]^node[5\-13] .UNINDENT .nf node[0\-4,11\-13] .fi .sp .TP .B Example of advanced usage .INDENT 7.0 .TP .B $ nodeset \-f @gpu^@slurm:bigmem!@chassis[1\-9/2] .UNINDENT .sp This computes a folded nodeset containing nodes found in group @gpu and @slurm:bigmem, but not in both, minus the nodes found in odd chassis groups from 1 to 9. .UNINDENT .SH EXIT STATUS .sp An exit status of zero indicates success of the \fCnodeset\fP command. A non\-zero exit status indicates failure. .SH EXAMPLES .INDENT 0.0 .TP .B Getting the node count .INDENT 7.0 .TP .B $ nodeset \-c node[0\-7,32\-159] .UNINDENT .nf 136 .fi .sp .INDENT 7.0 .TP .B $ nodeset \-c node[0\-7,32\-159] node[160\-163] .UNINDENT .nf 140 .fi .sp .INDENT 7.0 .TP .B $ nodeset \-c @login .UNINDENT .nf 4 .fi .sp .TP .B Folding nodesets .INDENT 7.0 .TP .B $ nodeset \-f node[0\-7,32\-159] node[160\-163] .UNINDENT .nf node[0\-7,32\-163] .fi .sp .INDENT 7.0 .TP .B $ echo node3 node6 node1 node2 node7 node5 | nodeset \-f .UNINDENT .nf node[1\-3,5\-7] .fi .sp .TP .B Expanding nodesets .INDENT 7.0 .TP .B $ nodeset \-e node[160\-163] .UNINDENT .nf node160 node161 node162 node163 .fi .sp .TP .B Excluding nodes from nodeset .INDENT 7.0 .TP .B $ nodeset \-f node[32\-159] \-x node33 .UNINDENT .nf node[32,34\-159] .fi .sp .TP .B Computing nodesets intersection .INDENT 7.0 .TP .B $ nodeset \-f node[32\-159] \-i node[0\-7,20\-21,32,156\-159] .UNINDENT .nf node[32,156\-159] .fi .sp .TP .B Computing nodesets symmetric difference (xor) .INDENT 7.0 .TP .B $ nodeset \-f node[33\-159] \-\-xor node[32\-33,156\-159] .UNINDENT .nf node[32,34\-155] .fi .sp .TP .B Splitting nodes into several nodesets (expanding results) .INDENT 7.0 .TP .B $ nodeset \-e \-\-split=3 node[1\-9] .UNINDENT .nf node1 node2 node3 node4 node5 node6 node7 node8 node9 .fi .sp .TP .B Splitting non\-contiguous nodesets (folding results) .INDENT 7.0 .TP .B $ nodeset \-f \-\-contiguous node2 node3 node4 node8 node9 .UNINDENT .nf node[2\-4] node[8\-9] .fi .sp .UNINDENT .SH HISTORY .sp Command syntax has been changed since \fCnodeset\fP command available with ClusterShell v1.1. Operations, like \fI\-\-intersection\fP or \fI\-x\fP, are now specified between nodesets in the command line. .INDENT 0.0 .TP .B ClusterShell v1.1: .INDENT 7.0 .TP .B $ nodeset \-f \-x node[3,5\-6,9] node[1\-9] .UNINDENT .nf node[1\-2,4,7\-8] .fi .sp .TP .B ClusterShell v1.2+: .INDENT 7.0 .TP .B $ nodeset \-f node[1\-9] \-x node[3,5\-6,9] .UNINDENT .nf node[1\-2,4,7\-8] .fi .sp .UNINDENT .SH SEE ALSO .sp \fCclush\fP(1), \fCclubak\fP(1), \fCgroups.conf\fP(5). .SH BUG REPORTS .INDENT 0.0 .TP .B Use the following URL to submit a bug report or feedback: . \fI\%https://github.com/cea\-hpc/clustershell/issues\fP .UNINDENT .SH AUTHOR Stephane Thiell, CEA DAM .SH COPYRIGHT CeCILL-C V1 .\" Generated by docutils manpage writer. .\" . clustershell-1.6/doc/man/man1/clush.10000644000130500135250000003060111741571247016777 0ustar thiellgpocre.\" Man page generated from reStructeredText. . .TH CLUSH 1 "2012-04-01" "1.6" "ClusterShell User Manual" .SH NAME clush \- execute shell commands on a cluster . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp \fCclush\fP \fC\-a\fP | \fC\-g\fP \fIgroup\fP | \fC\-w\fP \fInodes\fP [OPTIONS] .sp \fCclush\fP \fC\-a\fP | \fC\-g\fP \fIgroup\fP | \fC\-w\fP \fInodes\fP [OPTIONS] \fIcommand\fP .sp \fCclush\fP \fC\-a\fP | \fC\-g\fP \fIgroup\fP | \fC\-w\fP \fInodes\fP [OPTIONS] \-\-copy \fIfile\fP | \fIdir\fP [ \fIfile\fP | \fIdir\fP ...] [ \-\-dest \fIpath\fP ] .sp \fCclush\fP \fC\-a\fP | \fC\-g\fP \fIgroup\fP | \fC\-w\fP \fInodes\fP [OPTIONS] \-\-rcopy \fIfile\fP | \fIdir\fP [ \fIfile\fP | \fIdir\fP ...] [ \-\-dest \fIpath\fP ] .SH DESCRIPTION .sp \fCclush\fP is a program for executing commands in parallel on a cluster and for gathering their results. \fCclush\fP executes commands interactively or can be used within shell scripts and other applications. It is a partial front\-end to the ClusterShell library that ensures a light, unified and robust parallel command execution framework. Thus, it allows traditional shell scripts to benefit from some of the library features. \fCclush\fP currently makes use of the Ssh worker of ClusterShell that only requires \fCssh\fP(1) (OpenSSH SSH client). .SH INVOCATION .sp \fCclush\fP can be started non\-interactively to run a shell \fIcommand\fP, or can be invoked as an interactive shell. To start a \fCclush\fP interactive session, invoke the \fCclush\fP command without providing \fIcommand\fP. .INDENT 0.0 .TP .B Non\-interactive mode . When \fCclush\fP is started non\-interactively, the \fIcommand\fP is executed on the specified remote hosts in parallel. If option \fC\-b\fP or \fC\-\-dshbak\fP is specified, \fCclush\fP waits for command completion and then displays gathered output results. .sp The \fC\-w\fP option allows you to specify remote hosts by using ClusterShell NodeSet syntax, including the node groups \fC@group\fP special syntax and the \fCExtended Patterns\fP syntax to benefits from NodeSet basic arithmetics (like \fC@Agroup\e&@Bgroup\fP). See EXTENDED PATTERNS in \fCnodeset\fP(1) and also \fCgroups.conf\fP(5) for more information. .sp Unless option \fC\-\-nostdin\fP is specified, \fCclush\fP detects when its standard input is connected to a terminal (as determined by \fCisatty\fP(3)). If actually connected to a terminal, \fCclush\fP listens to standard input when commands are running, waiting for an \fIEnter\fP key press. Doing so will display the status of current nodes. If standard input is not connected to a terminal, and unless option \fC\-\-nostdin\fP is specified, \fCclush\fP binds the standard input of the remote commands to its own standard input, allowing scripting methods like: .INDENT 7.0 .INDENT 3.5 .nf # echo foo | clush \-w node[40\-42] \-b cat \-\-\-\-\-\-\-\-\-\-\-\-\-\-\- node[40\-42] \-\-\-\-\-\-\-\-\-\-\-\-\-\-\- foo .fi .sp .UNINDENT .UNINDENT .sp Please see some other great examples in the EXAMPLES section below. .TP .B Interactive session . If a \fIcommand\fP is not specified, and its standard input is connected to a terminal, \fCclush\fP runs interactively. In this mode, \fCclush\fP uses the GNU \fCreadline\fP library to read command lines. Readline provides commands for searching through the command history for lines containing a specified string. For instance, type Control\-R to search in the history for the next entry matching the search string typed so far. \fCclush\fP also recognizes special single\-character prefixes that allows the user to see and modify the current nodeset (the nodes where the commands are executed). .INDENT 7.0 .TP .B Single\-character interactive commands are: .INDENT 7.0 .TP .B clush> ? . show current nodeset .TP .B clush> = . set current nodeset .TP .B clush> + . add nodes to current nodeset .TP .B clush> \- . remove nodes from current nodeset .TP .B clush> !COMMAND . execute COMMAND on the local system .TP .B clush> = . toggle the output format (gathered or standard mode) .UNINDENT .UNINDENT .sp To leave an interactive session, type \fCquit\fP or Control\-D. .TP .B File copying mode ( \fC\-\-copy\fP ) .sp When \fCclush\fP is started with the \fC\-c\fP or \fC\-\-copy\fP option, it will attempt to copy specified \fIfile\fP and/or \fIdir\fP to the provided target cluster nodes. If the \fC\-\-dest\fP option is specified, it will put the copied files there. .TP .B Reverse file copying mode ( \fC\-\-rcopy\fP ) .sp When \fCclush\fP is started with the \fC\-\-rcopy\fP option, it will attempt to retrieve specified \fIfile\fP and/or \fIdir\fP from provided cluster nodes. If the \fC\-\-dest\fP option is specified, it must be a directory path where the files will be stored with their hostname appended. If the destination path is not specified, it will take the first \fIfile\fP or \fIdir\fP basename directory as the local destination. .UNINDENT .SH OPTIONS .INDENT 0.0 .TP .B \-\-version . show \fCclush\fP version number and exit .TP .BI \-s \ GROUPSOURCE, \ \-\-groupsource\fB= GROUPSOURCE . optional \fCgroups.conf\fP(5) group source to use .TP .B \-\-nostdin . do not watch for possible input from stdin .UNINDENT .INDENT 0.0 .TP .B Selecting target nodes: .INDENT 7.0 .TP .BI \-w \ NODES . nodes where to run the command .TP .BI \-x \ NODES . exclude nodes from the node list .TP .B \-a, \-\-all . run command on all nodes .TP .BI \-g \ GROUP, \ \-\-group\fB= GROUP . run command on a group of nodes .TP .BI \-X \ GROUP . exclude nodes from this group .UNINDENT .TP .B Output behaviour: .INDENT 7.0 .TP .B \-q, \-\-quiet . be quiet, print essential output only .TP .B \-v, \-\-verbose . be verbose, print informative messages .TP .B \-d, \-\-debug . output more messages for debugging purpose .TP .B \-G, \-\-groupbase . do not display group source prefix .TP .B \-L . disable header block and order output by nodes; additionally, when used in conjunction with \-b/\-B, it will enable "life gathering" of results by line mode, such as the next line is displayed as soon as possible (eg. when all nodes have sent the line) .TP .B \-N . disable labeling of command line .TP .B \-b, \-\-dshbak . display gathered results in a dshbak\-like way .TP .B \-B . like \-b but including standard error .TP .B \-r, \-\-regroup . fold nodeset using node groups .TP .B \-S . return the largest of command return codes .TP .BI \-\-color\fB= WHENCOLOR . whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. \fIWHENCOLOR\fP is \fCnever\fP, \fCalways\fP or \fCauto\fP (which use color if standard output/error refer to a terminal). Colors are set to [34m (blue foreground text) for stdout and [31m (red foreground text) for stderr, and cannot be modified. .TP .B \-\-diff . show diff between common outputs (find the best reference output by focusing on largest nodeset and also smaller command return code) .UNINDENT .TP .B File copying: .INDENT 7.0 .TP .B \-c, \-\-copy . copy local file or directory to remote nodes .TP .B \-\-rcopy . copy file or directory from remote nodes .TP .BI \-\-dest\fB= DEST_PATH . destination file or directory on the nodes (optional: use the first source directory path when not specified) .TP .B \-p . preserve modification times and modes .UNINDENT .TP .B Ssh options: .INDENT 7.0 .TP .BI \-f \ FANOUT, \ \-\-fanout\fB= FANOUT . use a specified maximum fanout size (ie. do not execute more than FANOUT commands at the same time, useful to limit resource usage) .TP .BI \-l \ USER, \ \-\-user\fB= USER . execute remote command as user .TP .BI \-o \ OPTIONS, \ \-\-options\fB= OPTIONS . can be used to give ssh options, eg. \fC\-o "\-oPort=2022"\fP .TP .BI \-t \ CONNECT_TIMEOUT, \ \-\-connect_timeout\fB= CONNECT_TIMEOUT . limit time to connect to a node .TP .BI \-u \ COMMAND_TIMEOUT, \ \-\-command_timeout\fB= COMMAND_TIMEOUT . limit time for command to run on the node .UNINDENT .UNINDENT .sp For a short explanation of these options, see \fC\-h, \-\-help\fP. .SH EXIT STATUS .sp By default, an exit status of zero indicates success of the \fCclush\fP command but gives no information about the remote commands exit status. However, when the \fC\-S\fP option is specified, the exit status of \fCclush\fP is the largest value of the remote commands return codes. .sp For failed remote commands whose exit status is non\-zero, and unless the combination of options \fC\-qS\fP is specified, \fCclush\fP displays messages similar to: .INDENT 0.0 .TP .B clush: node[40\-42]: exited with exit code 1 .UNINDENT .SH EXAMPLES .SS Basic .INDENT 0.0 .TP .B # clush \-w node[3\-5,62] uname \-r . Run command \fIuname \-r\fP in parallel on nodes: node3, node4, node5 and node62 .UNINDENT .SS Display features .INDENT 0.0 .TP .B # clush \-w node[3\-5,62] \-b uname \-r . Run command \fIuname \-r\fP on nodes[3\-5,62] and display gathered output results (integrated \fCdshbak\fP\-like). .TP .B # clush \-w node[3\-5,62] \-bL uname \-r . Line mode: run command \fIuname \-r\fP on nodes[3\-5,62] and display gathered output results without default header block. .TP .B # ssh node32 find /etc/yum.repos.d \-type f | clush \-w node[40\-42] \-b xargs ls \-l . Search some files on node32 in /etc/yum.repos.d and use clush to list the matching ones on node[40\-42], and use \fC\-b\fP to display gathered results. .TP .B # clush \-w node[3\-5,62] \-\-diff dmidecode \-s bios\-version . Run this Linux command to get BIOS version on nodes[3\-5,62] and show version differences (if any). .UNINDENT .SS All nodes .INDENT 0.0 .TP .B # clush \-a uname \-r . Run command \fIuname \-r\fP on all cluster nodes, see \fCgroups.conf\fP(5) to setup all cluster nodes (\fIall:\fP field). .TP .B # clush \-a \-x node[5,7] uname \-r . Run command \fIuname \-r\fP on all cluster nodes except on nodes node5 and node7. .TP .B # clush \-a \-\-diff cat /some/file . Run command \fIcat /some/file\fP on all cluster nodes and show differences (if any), line by line, between common outputs. .UNINDENT .SS Node groups .INDENT 0.0 .TP .B # clush \-w @oss modprobe lustre . Run command \fImodprobe lustre\fP on nodes from node group named \fIoss\fP, see \fCgroups.conf\fP(5) to setup node groups (\fImap:\fP field). .TP .B # clush \-g oss modprobe lustre . Same as previous example but using \fC\-g\fP to avoid \fI@\fP group prefix. .TP .B # clush \-w @mds,@oss modprobe lustre . You may specify several node groups by separating them with commas (please see EXTENDED PATTERNS in \fCnodeset\fP(1) and also \fCgroups.conf\fP(5) for more information). .UNINDENT .SS Copy files .INDENT 0.0 .TP .B # clush \-w node[3\-5,62] \-\-copy /etc/motd . Copy local file \fI/etc/motd\fP to remote nodes node[3\-5,62]. .TP .B # clush \-w node[3\-5,62] \-\-copy /etc/motd \-\-dest /tmp/motd2 . Copy local file \fI/etc/motd\fP to remote nodes node[3\-5,62] at path \fI/tmp/motd2\fP. .TP .B # clush \-w node[3\-5,62] \-c /usr/share/doc/clustershell . Recursively copy local directory \fI/usr/share/doc/clustershell\fP to the same path on remote nodes node[3\-5,62]. .TP .B # clush \-w node[3\-5,62] \-\-rcopy /etc/motd \-\-dest /tmp . Copy \fI/etc/motd\fP from remote nodes node[3\-5,62] to local \fI/tmp\fP directory, each file having their remote hostname appended, eg. \fI/tmp/motd.node3\fP. .UNINDENT .SH FILES .INDENT 0.0 .TP .B \fI/etc/clustershell/clush.conf\fP .sp System\-wide \fCclush\fP configuration file. .TP .B \fI~/.clush.conf\fP .sp This is the per\-user \fCclush\fP configuration file. .TP .B \fI~/.clush_history\fP .sp File in which interactive \fCclush\fP command history is saved. .UNINDENT .SH SEE ALSO .sp \fCclubak\fP(1), \fCnodeset\fP(1), \fCreadline\fP(3), \fCclush.conf\fP(5), \fCgroups.conf\fP(5). .SH BUG REPORTS .INDENT 0.0 .TP .B Use the following URL to submit a bug report or feedback: . \fI\%https://github.com/cea\-hpc/clustershell/issues\fP .UNINDENT .SH AUTHOR Stephane Thiell, CEA DAM .SH COPYRIGHT CeCILL-C V1 .\" Generated by docutils manpage writer. .\" . clustershell-1.6/doc/man/man1/clubak.10000644000130500135250000001013411741571247017121 0ustar thiellgpocre.\" Man page generated from reStructeredText. . .TH CLUBAK 1 "2012-03-28" "1.6" "ClusterShell User Manual" .SH NAME clubak \- format output from clush/pdsh-like output and more . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH SYNOPSIS .sp \fCclubak\fP [ OPTIONS ] .SH DESCRIPTION .sp \fCclubak\fP formats text from standard input containing lines of the form "\fInode:output\fP". It is fully backward compatible with \fCdshbak\fP(1) but provides additonal features. For instance, \fCclubak\fP always displays its results sorted by node/nodeset. .sp You do not need to use \fCclubak\fP when using \fCclush\fP(1) as all output formatting features are already included in. It is provided for other usages, like post\-processing results of the form "\fInode:output\fP". .sp Like \fCclush\fP(1), \fCclubak\fP uses the \fIClusterShell.MsgTree\fP module of the ClusterShell library (see \fCpydoc ClusterShell.MsgTree\fP). .SH INVOCATION .sp \fCclubak\fP should be started with connected standard input. .SH OPTIONS .INDENT 0.0 .TP .B \-\-version . show \fCclubak\fP version number and exit .TP .B \-b, \-c . gather nodes with same output (\-c is provided for \fCdshbak\fP(1) compatibility) .TP .B \-d, \-\-debug . output more messages for debugging purpose .TP .B \-L . disable header block and order output by nodes .TP .B \-r, \-\-regroup . fold nodeset using node groups .TP .BI \-s \ GROUPSOURCE, \ \-\-groupsource\fB= GROUPSOURCE . optional \fCgroups.conf\fP(5) group source to use .TP .B \-G, \-\-groupbase . do not display group source prefix (always \fI@groupname\fP) .TP .BI \-S \ SEPARATOR, \ \-\-separator\fB= SEPARATOR . node / line content separator string (default: \fI:\fP) .TP .B \-F, \-\-fast . faster but memory hungry mode (preload all messages per node) .TP .B \-T, \-\-tree . message tree trace mode; switch to enable \fCClusterShell.MsgTree\fP trace mode, all keys/nodes being kept for each message element of the tree, thus allowing special output gathering .TP .BI \-\-color\fB= WHENCOLOR . whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. \fIWHENCOLOR\fP is \fCnever\fP, \fCalways\fP or \fCauto\fP (which use color if standard output refers to a terminal). Color is set to [34m (blue foreground text) and cannot be modified. .TP .B \-\-diff . show diff between gathered outputs .UNINDENT .SH EXIT STATUS .sp An exit status of zero indicates success of the \fCclubak\fP command. .SH EXAMPLES .INDENT 0.0 .IP 1. 3 . \fCclubak\fP can be used to gather some recorded \fCclush\fP(1) results: .UNINDENT .INDENT 0.0 .TP .B Record \fCclush\fP(1) results in a file: .nf # clush \-w node[1\-7] uname \-r >/tmp/clush_output # clush \-w node[32\-159] uname \-r >>/tmp/clush_output .fi .sp .TP .B Display file gathered results (in line\-mode): .nf # clubak \-bL .SH COPYRIGHT CeCILL-C V1 .\" Generated by docutils manpage writer. .\" . clustershell-1.6/doc/man/man5/0000755000130500135250000000000011741572334015601 5ustar thiellgpocreclustershell-1.6/doc/man/man5/clush.conf.50000644000130500135250000001031011741571247017726 0ustar thiellgpocre.\" Man page generated from reStructeredText. . .TH CLUSH.CONF 5 "2012-02-15" "1.6" "ClusterShell User Manual" .SH NAME clush.conf \- Configuration file for clush . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp \fBclush\fP(1) obtains configuration options from the following sources in the following order: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .IP 1. 3 . command\-line options .IP 2. 3 . user configuration file (\fI~/.clush.conf\fP) .IP 3. 3 . system\-wide configuration file (\fI/etc/clustershell/clush.conf\fP) .UNINDENT .UNINDENT .UNINDENT .sp For each parameter, the first obtained value will be used. .sp The configuration file has a format in the style of RFC 822 composed of one main section: .INDENT 0.0 .TP .B Main . Program options definition .UNINDENT .SS [Main] .sp Configuration parameters of the \fBMain\fP section are described below. .INDENT 0.0 .TP .B fanout . Size of the sliding window of ssh connectors. .TP .B connect_timeout . Timeout in seconds to allow a connection to establish. This parameter is passed to ssh. If set to \fI0\fP, no timeout occurs. .TP .B command_timeout . Timeout in seconds to allow a command to complete since the connection has been established. This parameter is passed to ssh. In addition, the ClusterShell library ensures that any commands complete in less than ( connect_timeout + command_timeout ). If set to \fI0\fP, no timeout occurs. .TP .B color . Whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. Valid arguments are \fBnever\fP, \fBalways\fP or \fBauto\fP (which use color if standard output/error refer to a terminal). Colors are set to [34m (blue foreground text) for stdout and [31m (red foreground text) for stderr, and cannot be modified. .TP .B fd_max . Maximum number of open file descriptors permitted per clush process (soft resource limit for open files). This limit can never exceed the system (hard) limit. The \fIfd_max\fP (soft) and system (hard) limits should be high enough to run \fBclush\fP, although their values depend on your \fIfanout\fP value. .TP .B history_size . Set the maximum number of history entries saved in the GNU readline history list. Negative values imply unlimited history file size. .TP .B node_count . Should \fBclush\fP display additional (node count) information in buffer header? (\fIyes\fP/\fIno\fP) .TP .B verbosity . Set the verbosity level: \fI0\fP (quiet), \fI1\fP (default), \fI2\fP (verbose) or more (debug). .TP .B ssh_user . Set the ssh user to use for remote connection (default is to not specify). .TP .B ssh_path . Set the ssh binary path to use for remote connection (default is \fI/usr/bin/ssh\fP). .TP .B ssh_options . Set additional options to pass to the underlying ssh command. .UNINDENT .SH EXAMPLES .sp Simple configuration file. .SS \fIclush.conf\fP .nf [Main] fanout: 128 connect_timeout: 15 command_timeout: 0 history_size: 100 color: auto fd_max: 10240 node_count: yes .fi .sp .SH FILES .INDENT 0.0 .TP .B \fI~/.clush.conf\fP .sp This is the per\-user configuration file. .TP .B \fI/etc/clustershell/clush.conf\fP .sp System\-wide configuration file. .UNINDENT .SH HISTORY .sp As of ClusterShell version 1.3, the \fBExternal\fP section has been removed from \fIclush.conf\fP. External commands whose outputs were used by \fBclush\fP (\-a, \-g, \-X) are now handled by the library itself and defined in \fBgroups.conf\fP(5). .SH SEE ALSO .sp \fBclush\fP(1), \fBnodeset\fP(1), \fBgroups.conf\fP(5) .sp \fI\%http://clustershell.sourceforge.net/\fP .SH AUTHOR Stephane Thiell, CEA DAM .SH COPYRIGHT CeCILL-C V1 .\" Generated by docutils manpage writer. .\" . clustershell-1.6/doc/man/man5/groups.conf.50000644000130500135250000001263711741571247020145 0ustar thiellgpocre.\" Man page generated from reStructeredText. . .TH GROUPS.CONF 5 "2012-03-31" "1.6" "ClusterShell User Manual" .SH NAME groups.conf \- Configuration file for ClusterShell external node groups . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .SH DESCRIPTION .sp The ClusterShell library obtains node groups configuration options from the system\-wide configuration file \fI/etc/clustershell/groups.conf\fP. Additional configuration files are also read from the directories set by the groupsdir option, if present. See the \fCgroupsdir\fP option below for further details. .sp Configuration files have a format in the style of RFC 822 potentially composed of several sections which may be present in any order. There are two types of sections: Main and Group source: .INDENT 0.0 .TP .B Main . Global configuration options. There should be only one Main section. .TP .B \fIGroup_source\fP .sp The \fIGroup_source\fP section(s) define the configuration for each node group source (or namespace). This configuration consists in external commands definition (map, all, list and reverse). .UNINDENT .sp Only \fIGroup_source\fP section(s) are allowed in additional configuration files. .SS [Main] OPTIONS .sp Configuration parameters of the \fCMain\fP section are described below. .INDENT 0.0 .TP .B default . Specify the default group source (group namespace) used by the NodeSet parser when the user does not explicitly specify the group source (eg. "@io"). .TP .B groupsdir . Optional list of directories where the ClusterShell library should look for .conf files which define group sources to use. Each file in these directories with the .conf suffix should contain one or more \fIGroup_source\fP sections as documented in [\fIGroup_source\fP] options below. These will be merged with the group sources defined in \fI/etc/clustershell/groups.conf\fP to form the complete set of group sources that ClusterShell will use. Duplicate \fIGroup_source\fP sections are not allowed. Note: .conf files that are not readable by the current user are ignored. .UNINDENT .SS [\fIGroup_source\fP] OPTIONS .sp Configuration parameters of each group source section are described below. .INDENT 0.0 .TP .B map . Specify the external shell command used to resolve a group name into a nodeset, list of nodes or list of nodeset (separated by space characters or by carriage returns). The variable \fI$GROUP\fP is replaced before executing the command. .TP .B all . Optional external shell command that should return a nodeset, list of nodes or list of nodeset of all nodes for this group source. If not specified, the library will try to resolve all nodes by using the \fClist\fP external command in the same group source followed by \fCmap\fP for each group. .TP .B list . Optional external shell command that should return the list of all groups for this group source (separated by space characters or by carriage returns). .TP .B reverse . Optional external shell command used to find the group(s) of a single node. The variable $NODE is previously replaced. If this upcall is not specified, the reverse operation is computed in memory by the library from the \fIlist\fP and \fImap\fP external calls. Also, if the number of nodes to reverse is greater than the number of available groups, the \fIreverse\fP external command is avoided automatically. .UNINDENT .sp When the library executes a group source external shell command, the current working directory is previously set to the corresponding groupsdir. This allows the use of relative paths for third party files in the command. .sp Each external command might return a non\-zero return code when the operation is not doable. But if the call return zero, for instance, for a non\-existing group, the user will not receive any error when trying to resolve such unknown group. The desired behaviour is up to the system administrator. .SH RESOURCE USAGE .sp All external command results are cached in memory to avoid multiple calls. .SH EXAMPLES .sp Simple configuration file for local groups and slurm partitions binding. .SS \fIgroups.conf\fP .nf [Main] default: local #groupsdir: /etc/clustershell/groups.conf.d/ [local] map: sed \-n \(aqs/^$GROUP:(.*)/1/p\(aq /etc/clustershell/groups list: sed \-n \(aqs/^\e(\fC[0\-9A\-Za\-z_\-]\fP*\e):.*/\e1/p\(aq /etc/clustershell/groups [slurm] map: sinfo \-h \-o "%N" \-p $GROUP all: sinfo \-h \-o "%N" list: sinfo \-h \-o "%P" reverse: sinfo \-h \-N \-o "%P" \-n $NODE .fi .sp .SH FILES .INDENT 0.0 .TP .B \fI/etc/clustershell/groups.conf\fP .sp System\-wide external node groups configuration file. .TP .B \fI/etc/clustershell/groups.conf.d/\fP .sp Recommended directory for additional configuration files. .UNINDENT .SH SEE ALSO .sp \fCclush\fP(1), \fCclubak\fP(1), \fCnodeset\fP(1) .sp \fI\%http://clustershell.sourceforge.net/\fP .SH AUTHOR Stephane Thiell, CEA DAM .SH COPYRIGHT CeCILL-C V1 .\" Generated by docutils manpage writer. .\" . clustershell-1.6/doc/txt/0000755000130500135250000000000011741572334015005 5ustar thiellgpocreclustershell-1.6/doc/txt/clush.conf.txt0000644000130500135250000000650011741571247017613 0ustar thiellgpocre============ clush.conf ============ ------------------------------ Configuration file for `clush` ------------------------------ :Author: Stephane Thiell, CEA DAM :Date: 2012-02-15 :Copyright: CeCILL-C V1 :Version: 1.6 :Manual section: 5 :Manual group: ClusterShell User Manual DESCRIPTION =========== ``clush``\(1) obtains configuration options from the following sources in the following order: 1. command-line options 2. user configuration file (*~/.clush.conf*) 3. system-wide configuration file (*/etc/clustershell/clush.conf*) For each parameter, the first obtained value will be used. The configuration file has a format in the style of RFC 822 composed of one main section: Main Program options definition [Main] ------ Configuration parameters of the ``Main`` section are described below. fanout Size of the sliding window of ssh connectors. connect_timeout Timeout in seconds to allow a connection to establish. This parameter is passed to ssh. If set to *0*, no timeout occurs. command_timeout Timeout in seconds to allow a command to complete since the connection has been established. This parameter is passed to ssh. In addition, the ClusterShell library ensures that any commands complete in less than ( connect_timeout + command_timeout ). If set to *0*, no timeout occurs. color Whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. Valid arguments are ``never``, ``always`` or ``auto`` (which use color if standard output/error refer to a terminal). Colors are set to [34m (blue foreground text) for stdout and [31m (red foreground text) for stderr, and cannot be modified. fd_max Maximum number of open file descriptors permitted per clush process (soft resource limit for open files). This limit can never exceed the system (hard) limit. The `fd_max` (soft) and system (hard) limits should be high enough to run ``clush``, although their values depend on your `fanout` value. history_size Set the maximum number of history entries saved in the GNU readline history list. Negative values imply unlimited history file size. node_count Should ``clush`` display additional (node count) information in buffer header? (`yes`/`no`) verbosity Set the verbosity level: `0` (quiet), `1` (default), `2` (verbose) or more (debug). ssh_user Set the ssh user to use for remote connection (default is to not specify). ssh_path Set the ssh binary path to use for remote connection (default is `/usr/bin/ssh`). ssh_options Set additional options to pass to the underlying ssh command. EXAMPLES =========== Simple configuration file. *clush.conf* ------------ | [Main] | fanout: 128 | connect_timeout: 15 | command_timeout: 0 | history_size: 100 | color: auto | fd_max: 10240 | node_count: yes | FILES ===== *~/.clush.conf* This is the per-user configuration file. */etc/clustershell/clush.conf* System-wide configuration file. HISTORY ======= As of ClusterShell version 1.3, the ``External`` section has been removed from *clush.conf*. External commands whose outputs were used by ``clush`` (-a, -g, -X) are now handled by the library itself and defined in ``groups.conf``\(5). SEE ALSO ======== ``clush``\(1), ``nodeset``\(1), ``groups.conf``\(5) http://clustershell.sourceforge.net/ clustershell-1.6/doc/txt/clubak.txt0000644000130500135250000000635211741571247017017 0ustar thiellgpocre========= clubak ========= -------------------------------------------------- format output from clush/pdsh-like output and more -------------------------------------------------- :Author: Stephane Thiell, CEA DAM :Date: 2012-03-28 :Copyright: CeCILL-C V1 :Version: 1.6 :Manual section: 1 :Manual group: ClusterShell User Manual SYNOPSIS ======== ``clubak`` [ OPTIONS ] DESCRIPTION =========== ``clubak`` formats text from standard input containing lines of the form "`node:output`". It is fully backward compatible with ``dshbak``\(1) but provides additonal features. For instance, ``clubak`` always displays its results sorted by node/nodeset. You do not need to use ``clubak`` when using ``clush``\(1) as all output formatting features are already included in. It is provided for other usages, like post-processing results of the form "`node:output`". Like ``clush``\(1), ``clubak`` uses the `ClusterShell.MsgTree` module of the ClusterShell library (see ``pydoc ClusterShell.MsgTree``). INVOCATION ========== ``clubak`` should be started with connected standard input. OPTIONS ======= --version show ``clubak`` version number and exit -b, -c gather nodes with same output (-c is provided for ``dshbak``\(1) compatibility) -d, --debug output more messages for debugging purpose -L disable header block and order output by nodes -r, --regroup fold nodeset using node groups -s GROUPSOURCE, --groupsource=GROUPSOURCE optional ``groups.conf``\(5) group source to use -G, --groupbase do not display group source prefix (always `@groupname`) -S SEPARATOR, --separator=SEPARATOR node / line content separator string (default: `:`) -F, --fast faster but memory hungry mode (preload all messages per node) -T, --tree message tree trace mode; switch to enable ``ClusterShell.MsgTree`` trace mode, all keys/nodes being kept for each message element of the tree, thus allowing special output gathering --color=WHENCOLOR whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. *WHENCOLOR* is ``never``, ``always`` or ``auto`` (which use color if standard output refers to a terminal). Color is set to [34m (blue foreground text) and cannot be modified. --diff show diff between gathered outputs EXIT STATUS =========== An exit status of zero indicates success of the ``clubak`` command. EXAMPLES =========== 1. ``clubak`` can be used to gather some recorded ``clush``\(1) results: Record ``clush``\(1) results in a file: | # clush -w node[1-7] uname -r >/tmp/clush_output | # clush -w node[32-159] uname -r >>/tmp/clush_output Display file gathered results (in line-mode): | # clubak -bL :Date: 2012-03-31 :Copyright: CeCILL-C V1 :Version: 1.6 :Manual section: 1 :Manual group: ClusterShell User Manual SYNOPSIS ======== ``nodeset`` [COMMAND] [OPTIONS] [nodeset1 [-ixX] nodeset2|...] DESCRIPTION =========== ``nodeset`` is an utility command provided with the ClusterShell library which implements some features of ClusterShell's NodeSet and RangeSet Python classes. It provides easy manipulation of indexed cluster nodes and node groups. It is automatically bound to the library node group resolution mechanism. Thus, ``nodeset`` is especially useful to enhance cluster aware administration shell scripts. OPTIONS ======= --version show program's version number and exit -h, --help show this help message and exit -s GROUPSOURCE, --groupsource=GROUPSOURCE optional ``groups.conf``\(5) group source to use Commands: -c, --count show number of nodes in nodeset(s) -e, --expand expand nodeset(s) to separate nodes (see also -S *SEPARATOR*) -f, --fold fold nodeset(s) (or separate nodes) into one nodeset -l, --list list node groups, list node groups and nodes (``-ll``) or list node groups, nodes and node count (``-lll``). When no argument is specified at all, this command will list all node group names found in selected group source (see also -s *GROUPSOURCE*). If any nodesets are specified as argument, this command will find node groups these nodes belongs to (individually). Optionally for each group, the fraction of these nodes being member of the group may be displayed (with ``-ll``), and also member count/total group node count (with ``-lll``). If a single hyphen-minus (-) is given as a nodeset, it will be read from standard input. -r, --regroup fold nodes using node groups (see -s *GROUPSOURCE*) --groupsources list all configured group sources (see ``groups.conf``\(5)) Operations: -x SUB_NODES, --exclude=SUB_NODES exclude specified nodeset -i AND_NODES, --intersection=AND_NODES calculate nodesets intersection -X XOR_NODES, --xor=XOR_NODES calculate symmetric difference between nodesets Options: -a, --all call external node groups support to display all nodes --autostep=AUTOSTEP auto step threshold number when folding nodesets, if not specified, auto step is disabled. Example: autostep=4, "node2 node4 node6" folds in node[2,4,6] but autostep=3, "node2 node4 node6" folds in node[2-6/2] -d, --debug output more messages for debugging purpose -q, --quiet be quiet, print essential output only -R, --rangeset switch to RangeSet instead of NodeSet. Useful when working on numerical cluster ranges, eg. 1,5,18-31 -G, --groupbase hide group source prefix (always `@groupname`) -S SEPARATOR, --separator=SEPARATOR separator string to use when expanding nodesets (default: ' ') -I SLICE_RANGESET, --slice=SLICE_RANGESET return sliced off result; examples of SLICE_RANGESET are "0" for simple index selection, or "1-9/2,16" for complex rangeset selection --split=MAXSPLIT split result into a number of subsets --contiguous split result into contiguous subsets (ie. for nodeset, subsets will contain nodes with same pattern name and a contiguous range of indexes, like foobar[1-100]; for rangeset, subsets with consists in contiguous index ranges)""" For a short explanation of these options, see ``-h, --help``. If a single hyphen-minus (-) is given as a nodeset, it will be read from standard input. EXTENDED PATTERNS ================= The ``nodeset`` command benefits from ClusterShell NodeSet basic arithmetic addition. This feature extends recognized string patterns by supporting operators matching all Operations seen previously. String patterns are read from left to right, by proceeding any character operators accordinately. Supported character operators ``,`` indicates that the *union* of both left and right nodeset should be computed before continuing ``!`` indicates the *difference* operation ``&`` indicates the *intersection* operation ``^`` indicates the *symmetric difference* (XOR) operation Care should be taken to escape these characters as needed when the shell does not interpret them literally. Examples of use of extended patterns :$ nodeset -f node[0-7],node[8-10]: | node[0-10] :$ nodeset -f node[0-10]\!node[8-10]: | node[0-7] :$ nodeset -f node[0-10]\&node[5-13]: | node[5-10] :$ nodeset -f node[0-10]^node[5-13]: | node[0-4,11-13] Example of advanced usage :$ nodeset -f @gpu^@slurm\:bigmem!@chassis[1-9/2]: This computes a folded nodeset containing nodes found in group @gpu and @slurm:bigmem, but not in both, minus the nodes found in odd chassis groups from 1 to 9. EXIT STATUS =========== An exit status of zero indicates success of the ``nodeset`` command. A non-zero exit status indicates failure. EXAMPLES =========== Getting the node count :$ nodeset -c node[0-7,32-159]: | 136 :$ nodeset -c node[0-7,32-159] node[160-163]: | 140 :$ nodeset -c @login: | 4 Folding nodesets :$ nodeset -f node[0-7,32-159] node[160-163]: | node[0-7,32-163] :$ echo node3 node6 node1 node2 node7 node5 | nodeset -f: | node[1-3,5-7] Expanding nodesets :$ nodeset -e node[160-163]: | node160 node161 node162 node163 Excluding nodes from nodeset :$ nodeset -f node[32-159] -x node33: | node[32,34-159] Computing nodesets intersection :$ nodeset -f node[32-159] -i node[0-7,20-21,32,156-159]: | node[32,156-159] Computing nodesets symmetric difference (xor) :$ nodeset -f node[33-159] --xor node[32-33,156-159]: | node[32,34-155] Splitting nodes into several nodesets (expanding results) :$ nodeset -e --split=3 node[1-9]: | node1 node2 node3 | node4 node5 node6 | node7 node8 node9 Splitting non-contiguous nodesets (folding results) :$ nodeset -f --contiguous node2 node3 node4 node8 node9: | node[2-4] | node[8-9] HISTORY ======= Command syntax has been changed since ``nodeset`` command available with ClusterShell v1.1. Operations, like *--intersection* or *-x*, are now specified between nodesets in the command line. ClusterShell v1.1: :$ nodeset -f -x node[3,5-6,9] node[1-9]: | node[1-2,4,7-8] ClusterShell v1.2+: :$ nodeset -f node[1-9] -x node[3,5-6,9]: | node[1-2,4,7-8] SEE ALSO ======== ``clush``\(1), ``clubak``\(1), ``groups.conf``\(5). BUG REPORTS =========== Use the following URL to submit a bug report or feedback: https://github.com/cea-hpc/clustershell/issues clustershell-1.6/doc/txt/groups.conf.txt0000644000130500135250000001131111741571247020010 0ustar thiellgpocre============= groups.conf ============= -------------------------------------------------------- Configuration file for ClusterShell external node groups -------------------------------------------------------- :Author: Stephane Thiell, CEA DAM :Date: 2012-03-31 :Copyright: CeCILL-C V1 :Version: 1.6 :Manual section: 5 :Manual group: ClusterShell User Manual DESCRIPTION =========== The ClusterShell library obtains node groups configuration options from the system-wide configuration file */etc/clustershell/groups.conf*. Additional configuration files are also read from the directories set by the groupsdir option, if present. See the ``groupsdir`` option below for further details. Configuration files have a format in the style of RFC 822 potentially composed of several sections which may be present in any order. There are two types of sections: Main and Group source: Main Global configuration options. There should be only one Main section. *Group_source* The *Group_source* section(s) define the configuration for each node group source (or namespace). This configuration consists in external commands definition (map, all, list and reverse). Only *Group_source* section(s) are allowed in additional configuration files. [Main] OPTIONS -------------- Configuration parameters of the ``Main`` section are described below. default Specify the default group source (group namespace) used by the NodeSet parser when the user does not explicitly specify the group source (eg. "@io"). groupsdir Optional list of directories where the ClusterShell library should look for .conf files which define group sources to use. Each file in these directories with the .conf suffix should contain one or more *Group_source* sections as documented in [*Group_source*] options below. These will be merged with the group sources defined in */etc/clustershell/groups.conf* to form the complete set of group sources that ClusterShell will use. Duplicate *Group_source* sections are not allowed. Note: .conf files that are not readable by the current user are ignored. [*Group_source*] OPTIONS ------------------------ Configuration parameters of each group source section are described below. map Specify the external shell command used to resolve a group name into a nodeset, list of nodes or list of nodeset (separated by space characters or by carriage returns). The variable *$GROUP* is replaced before executing the command. all Optional external shell command that should return a nodeset, list of nodes or list of nodeset of all nodes for this group source. If not specified, the library will try to resolve all nodes by using the ``list`` external command in the same group source followed by ``map`` for each group. list Optional external shell command that should return the list of all groups for this group source (separated by space characters or by carriage returns). reverse Optional external shell command used to find the group(s) of a single node. The variable $NODE is previously replaced. If this upcall is not specified, the reverse operation is computed in memory by the library from the *list* and *map* external calls. Also, if the number of nodes to reverse is greater than the number of available groups, the *reverse* external command is avoided automatically. When the library executes a group source external shell command, the current working directory is previously set to the corresponding groupsdir. This allows the use of relative paths for third party files in the command. Each external command might return a non-zero return code when the operation is not doable. But if the call return zero, for instance, for a non-existing group, the user will not receive any error when trying to resolve such unknown group. The desired behaviour is up to the system administrator. RESOURCE USAGE ============== All external command results are cached in memory to avoid multiple calls. EXAMPLES ======== Simple configuration file for local groups and slurm partitions binding. *groups.conf* ------------- | [Main] | default: local | #groupsdir: /etc/clustershell/groups.conf.d/ | | [local] | map: sed -n 's/^$GROUP:\(.*\)/\1/p' /etc/clustershell/groups | list: sed -n \'s/^\\(``[0-9A-Za-z_-]``\*\\):.*/\\1/p' /etc/clustershell/groups | | [slurm] | map: sinfo -h -o "%N" -p $GROUP | all: sinfo -h -o "%N" | list: sinfo -h -o "%P" | reverse: sinfo -h -N -o "%P" -n $NODE FILES ===== */etc/clustershell/groups.conf* System-wide external node groups configuration file. */etc/clustershell/groups.conf.d/* Recommended directory for additional configuration files. SEE ALSO ======== ``clush``\(1), ``clubak``\(1), ``nodeset``\(1) http://clustershell.sourceforge.net/ clustershell-1.6/doc/txt/clush.txt0000644000130500135250000002572711741571247016703 0ustar thiellgpocre========= clush ========= ----------------------------------- execute shell commands on a cluster ----------------------------------- :Author: Stephane Thiell, CEA DAM :Date: 2012-04-01 :Copyright: CeCILL-C V1 :Version: 1.6 :Manual section: 1 :Manual group: ClusterShell User Manual SYNOPSIS ======== ``clush`` ``-a`` | ``-g`` *group* | ``-w`` *nodes* [OPTIONS] ``clush`` ``-a`` | ``-g`` *group* | ``-w`` *nodes* [OPTIONS] *command* ``clush`` ``-a`` | ``-g`` *group* | ``-w`` *nodes* [OPTIONS] --copy *file* | *dir* [ *file* | *dir* ...] [ --dest *path* ] ``clush`` ``-a`` | ``-g`` *group* | ``-w`` *nodes* [OPTIONS] --rcopy *file* | *dir* [ *file* | *dir* ...] [ --dest *path* ] DESCRIPTION =========== ``clush`` is a program for executing commands in parallel on a cluster and for gathering their results. ``clush`` executes commands interactively or can be used within shell scripts and other applications. It is a partial front-end to the ClusterShell library that ensures a light, unified and robust parallel command execution framework. Thus, it allows traditional shell scripts to benefit from some of the library features. ``clush`` currently makes use of the Ssh worker of ClusterShell that only requires ``ssh``\(1) (OpenSSH SSH client). INVOCATION ========== ``clush`` can be started non-interactively to run a shell *command*, or can be invoked as an interactive shell. To start a ``clush`` interactive session, invoke the ``clush`` command without providing *command*. Non-interactive mode When ``clush`` is started non-interactively, the *command* is executed on the specified remote hosts in parallel. If option ``-b`` or ``--dshbak`` is specified, ``clush`` waits for command completion and then displays gathered output results. The ``-w`` option allows you to specify remote hosts by using ClusterShell NodeSet syntax, including the node groups ``@group`` special syntax and the ``Extended Patterns`` syntax to benefits from NodeSet basic arithmetics (like ``@Agroup\&@Bgroup``). See EXTENDED PATTERNS in ``nodeset``\(1) and also ``groups.conf``\(5) for more information. Unless option ``--nostdin`` is specified, ``clush`` detects when its standard input is connected to a terminal (as determined by ``isatty``\(3)). If actually connected to a terminal, ``clush`` listens to standard input when commands are running, waiting for an `Enter` key press. Doing so will display the status of current nodes. If standard input is not connected to a terminal, and unless option ``--nostdin`` is specified, ``clush`` binds the standard input of the remote commands to its own standard input, allowing scripting methods like: | # echo foo | clush -w node[40-42] -b cat | --------------- | node[40-42] | --------------- | foo Please see some other great examples in the EXAMPLES section below. Interactive session If a *command* is not specified, and its standard input is connected to a terminal, ``clush`` runs interactively. In this mode, ``clush`` uses the GNU ``readline`` library to read command lines. Readline provides commands for searching through the command history for lines containing a specified string. For instance, type Control-R to search in the history for the next entry matching the search string typed so far. ``clush`` also recognizes special single-character prefixes that allows the user to see and modify the current nodeset (the nodes where the commands are executed). Single-character interactive commands are: clush> ? show current nodeset clush> = set current nodeset clush> + add nodes to current nodeset clush> - remove nodes from current nodeset clush> !COMMAND execute COMMAND on the local system clush> = toggle the output format (gathered or standard mode) To leave an interactive session, type ``quit`` or Control-D. File copying mode ( ``--copy`` ) When ``clush`` is started with the ``-c`` or ``--copy`` option, it will attempt to copy specified *file* and/or *dir* to the provided target cluster nodes. If the ``--dest`` option is specified, it will put the copied files there. Reverse file copying mode ( ``--rcopy`` ) When ``clush`` is started with the ``--rcopy`` option, it will attempt to retrieve specified *file* and/or *dir* from provided cluster nodes. If the ``--dest`` option is specified, it must be a directory path where the files will be stored with their hostname appended. If the destination path is not specified, it will take the first *file* or *dir* basename directory as the local destination. OPTIONS ======= --version show ``clush`` version number and exit -s GROUPSOURCE, --groupsource=GROUPSOURCE optional ``groups.conf``\(5) group source to use --nostdin do not watch for possible input from stdin Selecting target nodes: -w NODES nodes where to run the command -x NODES exclude nodes from the node list -a, --all run command on all nodes -g GROUP, --group=GROUP run command on a group of nodes -X GROUP exclude nodes from this group Output behaviour: -q, --quiet be quiet, print essential output only -v, --verbose be verbose, print informative messages -d, --debug output more messages for debugging purpose -G, --groupbase do not display group source prefix -L disable header block and order output by nodes; additionally, when used in conjunction with -b/-B, it will enable "life gathering" of results by line mode, such as the next line is displayed as soon as possible (eg. when all nodes have sent the line) -N disable labeling of command line -b, --dshbak display gathered results in a dshbak-like way -B like -b but including standard error -r, --regroup fold nodeset using node groups -S return the largest of command return codes --color=WHENCOLOR whether to use ANSI colors to surround node or nodeset prefix/header with escape sequences to display them in color on the terminal. *WHENCOLOR* is ``never``, ``always`` or ``auto`` (which use color if standard output/error refer to a terminal). Colors are set to [34m (blue foreground text) for stdout and [31m (red foreground text) for stderr, and cannot be modified. --diff show diff between common outputs (find the best reference output by focusing on largest nodeset and also smaller command return code) File copying: -c, --copy copy local file or directory to remote nodes --rcopy copy file or directory from remote nodes --dest=DEST_PATH destination file or directory on the nodes (optional: use the first source directory path when not specified) -p preserve modification times and modes Ssh options: -f FANOUT, --fanout=FANOUT use a specified maximum fanout size (ie. do not execute more than FANOUT commands at the same time, useful to limit resource usage) -l USER, --user=USER execute remote command as user -o OPTIONS, --options=OPTIONS can be used to give ssh options, eg. ``-o "-oPort=2022"`` -t CONNECT_TIMEOUT, --connect_timeout=CONNECT_TIMEOUT limit time to connect to a node -u COMMAND_TIMEOUT, --command_timeout=COMMAND_TIMEOUT limit time for command to run on the node For a short explanation of these options, see ``-h, --help``. EXIT STATUS =========== By default, an exit status of zero indicates success of the ``clush`` command but gives no information about the remote commands exit status. However, when the ``-S`` option is specified, the exit status of ``clush`` is the largest value of the remote commands return codes. For failed remote commands whose exit status is non-zero, and unless the combination of options ``-qS`` is specified, ``clush`` displays messages similar to: :clush\: node[40-42]\: exited with exit code 1: EXAMPLES =========== Basic ----- :# clush -w node[3-5,62] uname -r: Run command `uname -r` in parallel on nodes: node3, node4, node5 and node62 Display features ---------------- :# clush -w node[3-5,62] -b uname -r: Run command `uname -r` on nodes[3-5,62] and display gathered output results (integrated ``dshbak``-like). :# clush -w node[3-5,62] -bL uname -r: Line mode: run command `uname -r` on nodes[3-5,62] and display gathered output results without default header block. :# ssh node32 find /etc/yum.repos.d -type f | clush -w node[40-42] -b xargs ls -l: Search some files on node32 in /etc/yum.repos.d and use clush to list the matching ones on node[40-42], and use ``-b`` to display gathered results. :# clush -w node[3-5,62] --diff dmidecode -s bios-version: Run this Linux command to get BIOS version on nodes[3-5,62] and show version differences (if any). All nodes --------- :# clush -a uname -r: Run command `uname -r` on all cluster nodes, see ``groups.conf``\(5) to setup all cluster nodes (`all:` field). :# clush -a -x node[5,7] uname -r: Run command `uname -r` on all cluster nodes except on nodes node5 and node7. :# clush -a --diff cat /some/file: Run command `cat /some/file` on all cluster nodes and show differences (if any), line by line, between common outputs. Node groups ----------- :# clush -w @oss modprobe lustre: Run command `modprobe lustre` on nodes from node group named `oss`, see ``groups.conf``\(5) to setup node groups (`map:` field). :# clush -g oss modprobe lustre: Same as previous example but using ``-g`` to avoid `@` group prefix. :# clush -w @mds,@oss modprobe lustre: You may specify several node groups by separating them with commas (please see EXTENDED PATTERNS in ``nodeset``\(1) and also ``groups.conf``\(5) for more information). Copy files ---------- :# clush -w node[3-5,62] --copy /etc/motd: Copy local file `/etc/motd` to remote nodes node[3-5,62]. :# clush -w node[3-5,62] --copy /etc/motd --dest /tmp/motd2: Copy local file `/etc/motd` to remote nodes node[3-5,62] at path `/tmp/motd2`. :# clush -w node[3-5,62] -c /usr/share/doc/clustershell: Recursively copy local directory `/usr/share/doc/clustershell` to the same path on remote nodes node[3-5,62]. :# clush -w node[3-5,62] --rcopy /etc/motd --dest /tmp: Copy `/etc/motd` from remote nodes node[3-5,62] to local `/tmp` directory, each file having their remote hostname appended, eg. `/tmp/motd.node3`. FILES ===== */etc/clustershell/clush.conf* System-wide ``clush`` configuration file. *~/.clush.conf* This is the per-user ``clush`` configuration file. *~/.clush_history* File in which interactive ``clush`` command history is saved. SEE ALSO ======== ``clubak``\(1), ``nodeset``\(1), ``readline``\(3), ``clush.conf``\(5), ``groups.conf``\(5). BUG REPORTS =========== Use the following URL to submit a bug report or feedback: https://github.com/cea-hpc/clustershell/issues clustershell-1.6/doc/txt/README0000644000130500135250000000031111741571247015662 0ustar thiellgpocreFiles found in this directory are text files in reStructuredText format (Markup Syntax of Docutils). We use rst1man.py to convert them to roff man pages. See: http://docutils.sourceforge.net/rst.html clustershell-1.6/doc/epydoc/0000755000130500135250000000000011741572334015451 5ustar thiellgpocreclustershell-1.6/doc/epydoc/clustershell_epydoc.conf0000644000130500135250000000204211741571247022374 0ustar thiellgpocre# To generate ClusterShell epydoc documentation, set your current # directory to the package root directory, then use the following # command: # # $ epydoc --config doc/epydoc/clustershell_epydoc.conf # [epydoc] # Epydoc section marker (required by ConfigParser) # Information about the project. name: ClusterShell url: http://clustershell.sourceforge.net # The list of modules to document. modules: lib/ClusterShell, scripts/clubak.py, scripts/clush.py, scripts/nodeset.py #exclude: ClusterShell\.Worker\.Paramiko # The type of the output that should be generated. output: html #output: pdf # Write html output to the following directory target: doc/epydoc/html # Include all automatically generated graphs. These graphs are # generated using Graphviz dot. graph: all dotpath: /usr/bin/dot # The format for showing inheritance objects. # It should be one of: 'grouped', 'listed', 'included'. #inheritance: listed # Whether or not to include syntax highlighted source code in # the output (HTML only). sourcecode: yes #docformat: restructuredtext clustershell-1.6/Licence_CeCILL-C_V1-fr.txt0000644000130500135250000005434411741571247017746 0ustar thiellgpocre CONTRAT DE LICENCE DE LOGICIEL LIBRE CeCILL-C Avertissement Ce contrat est une licence de logiciel libre issue d'une concertation entre ses auteurs afin que le respect de deux grands principes préside à sa rédaction: * d'une part, le respect des principes de diffusion des logiciels libres: accès au code source, droits étendus conférés aux utilisateurs, * d'autre part, la désignation d'un droit applicable, le droit français, auquel elle est conforme, tant au regard du droit de la responsabilité civile que du droit de la propriété intellectuelle et de la protection qu'il offre aux auteurs et titulaires des droits patrimoniaux sur un logiciel. Les auteurs de la licence CeCILL-C (pour Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) sont: Commissariat à l'Energie Atomique - CEA, établissement public de recherche à caractère scientifique, technique et industriel, dont le siège est situé 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris. Centre National de la Recherche Scientifique - CNRS, établissement public à caractère scientifique et technologique, dont le siège est situé 3 rue Michel-Ange, 75794 Paris cedex 16. Institut National de Recherche en Informatique et en Automatique - INRIA, établissement public à caractère scientifique et technologique, dont le siège est situé Domaine de Voluceau, Rocquencourt, BP 105, 78153 Le Chesnay cedex. Préambule Ce contrat est une licence de logiciel libre dont l'objectif est de conférer aux utilisateurs la liberté de modifier et de réutiliser le logiciel régi par cette licence. L'exercice de cette liberté est assorti d'une obligation de remettre à la disposition de la communauté les modifications apportées au code source du logiciel afin de contribuer à son évolution. L'accessibilité au code source et les droits de copie, de modification et de redistribution qui découlent de ce contrat ont pour contrepartie de n'offrir aux utilisateurs qu'une garantie limitée et de ne faire peser sur l'auteur du logiciel, le titulaire des droits patrimoniaux et les concédants successifs qu'une responsabilité restreinte. A cet égard l'attention de l'utilisateur est attirée sur les risques associés au chargement, à l'utilisation, à la modification et/ou au développement et à la reproduction du logiciel par l'utilisateur étant donné sa spécificité de logiciel libre, qui peut le rendre complexe à manipuler et qui le réserve donc à des développeurs ou des professionnels avertis possédant des connaissances informatiques approfondies. Les utilisateurs sont donc invités à charger et tester l'adéquation du logiciel à leurs besoins dans des conditions permettant d'assurer la sécurité de leurs systèmes et/ou de leurs données et, plus généralement, à l'utiliser et l'exploiter dans les mêmes conditions de sécurité. Ce contrat peut être reproduit et diffusé librement, sous réserve de le conserver en l'état, sans ajout ni suppression de clauses. Ce contrat est susceptible de s'appliquer à tout logiciel dont le titulaire des droits patrimoniaux décide de soumettre l'exploitation aux dispositions qu'il contient. Article 1 - DEFINITIONS Dans ce contrat, les termes suivants, lorsqu'ils seront écrits avec une lettre capitale, auront la signification suivante: Contrat: désigne le présent contrat de licence, ses éventuelles versions postérieures et annexes. Logiciel: désigne le logiciel sous sa forme de Code Objet et/ou de Code Source et le cas échéant sa documentation, dans leur état au moment de l'acceptation du Contrat par le Licencié. Logiciel Initial: désigne le Logiciel sous sa forme de Code Source et éventuellement de Code Objet et le cas échéant sa documentation, dans leur état au moment de leur première diffusion sous les termes du Contrat. Logiciel Modifié: désigne le Logiciel modifié par au moins une Contribution Intégrée. Code Source: désigne l'ensemble des instructions et des lignes de programme du Logiciel et auquel l'accès est nécessaire en vue de modifier le Logiciel. Code Objet: désigne les fichiers binaires issus de la compilation du Code Source. Titulaire: désigne le ou les détenteurs des droits patrimoniaux d'auteur sur le Logiciel Initial. Licencié: désigne le ou les utilisateurs du Logiciel ayant accepté le Contrat. Contributeur: désigne le Licencié auteur d'au moins une Contribution Intégrée. Concédant: désigne le Titulaire ou toute personne physique ou morale distribuant le Logiciel sous le Contrat. Contribution Intégrée: désigne l'ensemble des modifications, corrections, traductions, adaptations et/ou nouvelles fonctionnalités intégrées dans le Code Source par tout Contributeur. Module Lié: désigne un ensemble de fichiers sources y compris leur documentation qui, sans modification du Code Source, permet de réaliser des fonctionnalités ou services supplémentaires à ceux fournis par le Logiciel. Logiciel Dérivé: désigne toute combinaison du Logiciel, modifié ou non, et d'un Module Lié. Parties: désigne collectivement le Licencié et le Concédant. Ces termes s'entendent au singulier comme au pluriel. Article 2 - OBJET Le Contrat a pour objet la concession par le Concédant au Licencié d'une licence non exclusive, cessible et mondiale du Logiciel telle que définie ci-après à l'article 5 pour toute la durée de protection des droits portant sur ce Logiciel. Article 3 - ACCEPTATION 3.1 L'acceptation par le Licencié des termes du Contrat est réputée acquise du fait du premier des faits suivants: * (i) le chargement du Logiciel par tout moyen notamment par téléchargement à partir d'un serveur distant ou par chargement à partir d'un support physique; * (ii) le premier exercice par le Licencié de l'un quelconque des droits concédés par le Contrat. 3.2 Un exemplaire du Contrat, contenant notamment un avertissement relatif aux spécificités du Logiciel, à la restriction de garantie et à la limitation à un usage par des utilisateurs expérimentés a été mis à disposition du Licencié préalablement à son acceptation telle que définie à l'article 3.1 ci dessus et le Licencié reconnaît en avoir pris connaissance. Article 4 - ENTREE EN VIGUEUR ET DUREE 4.1 ENTREE EN VIGUEUR Le Contrat entre en vigueur à la date de son acceptation par le Licencié telle que définie en 3.1. 4.2 DUREE Le Contrat produira ses effets pendant toute la durée légale de protection des droits patrimoniaux portant sur le Logiciel. Article 5 - ETENDUE DES DROITS CONCEDES Le Concédant concède au Licencié, qui accepte, les droits suivants sur le Logiciel pour toutes destinations et pour la durée du Contrat dans les conditions ci-après détaillées. Par ailleurs, si le Concédant détient ou venait à détenir un ou plusieurs brevets d'invention protégeant tout ou partie des fonctionnalités du Logiciel ou de ses composants, il s'engage à ne pas opposer les éventuels droits conférés par ces brevets aux Licenciés successifs qui utiliseraient, exploiteraient ou modifieraient le Logiciel. En cas de cession de ces brevets, le Concédant s'engage à faire reprendre les obligations du présent alinéa aux cessionnaires. 5.1 DROIT D'UTILISATION Le Licencié est autorisé à utiliser le Logiciel, sans restriction quant aux domaines d'application, étant ci-après précisé que cela comporte: 1. la reproduction permanente ou provisoire du Logiciel en tout ou partie par tout moyen et sous toute forme. 2. le chargement, l'affichage, l'exécution, ou le stockage du Logiciel sur tout support. 3. la possibilité d'en observer, d'en étudier, ou d'en tester le fonctionnement afin de déterminer les idées et principes qui sont à la base de n'importe quel élément de ce Logiciel; et ceci, lorsque le Licencié effectue toute opération de chargement, d'affichage, d'exécution, de transmission ou de stockage du Logiciel qu'il est en droit d'effectuer en vertu du Contrat. 5.2 DROIT DE MODIFICATION Le droit de modification comporte le droit de traduire, d'adapter, d'arranger ou d'apporter toute autre modification au Logiciel et le droit de reproduire le logiciel en résultant. Il comprend en particulier le droit de créer un Logiciel Dérivé. Le Licencié est autorisé à apporter toute modification au Logiciel sous réserve de mentionner, de façon explicite, son nom en tant qu'auteur de cette modification et la date de création de celle-ci. 5.3 DROIT DE DISTRIBUTION Le droit de distribution comporte notamment le droit de diffuser, de transmettre et de communiquer le Logiciel au public sur tout support et par tout moyen ainsi que le droit de mettre sur le marché à titre onéreux ou gratuit, un ou des exemplaires du Logiciel par tout procédé. Le Licencié est autorisé à distribuer des copies du Logiciel, modifié ou non, à des tiers dans les conditions ci-après détaillées. 5.3.1 DISTRIBUTION DU LOGICIEL SANS MODIFICATION Le Licencié est autorisé à distribuer des copies conformes du Logiciel, sous forme de Code Source ou de Code Objet, à condition que cette distribution respecte les dispositions du Contrat dans leur totalité et soit accompagnée: 1. d'un exemplaire du Contrat, 2. d'un avertissement relatif à la restriction de garantie et de responsabilité du Concédant telle que prévue aux articles 8 et 9, et que, dans le cas où seul le Code Objet du Logiciel est redistribué, le Licencié permette un accès effectif au Code Source complet du Logiciel pendant au moins toute la durée de sa distribution du Logiciel, étant entendu que le coût additionnel d'acquisition du Code Source ne devra pas excéder le simple coût de transfert des données. 5.3.2 DISTRIBUTION DU LOGICIEL MODIFIE Lorsque le Licencié apporte une Contribution Intégrée au Logiciel, les conditions de distribution du Logiciel Modifié en résultant sont alors soumises à l'intégralité des dispositions du Contrat. Le Licencié est autorisé à distribuer le Logiciel Modifié sous forme de code source ou de code objet, à condition que cette distribution respecte les dispositions du Contrat dans leur totalité et soit accompagnée: 1. d'un exemplaire du Contrat, 2. d'un avertissement relatif à la restriction de garantie et de responsabilité du Concédant telle que prévue aux articles 8 et 9, et que, dans le cas où seul le code objet du Logiciel Modifié est redistribué, le Licencié permette un accès effectif à son code source complet pendant au moins toute la durée de sa distribution du Logiciel Modifié, étant entendu que le coût additionnel d'acquisition du code source ne devra pas excéder le simple coût de transfert des données. 5.3.3 DISTRIBUTION DU LOGICIEL DERIVE Lorsque le Licencié crée un Logiciel Dérivé, ce Logiciel Dérivé peut être distribué sous un contrat de licence autre que le présent Contrat à condition de respecter les obligations de mention des droits sur le Logiciel telles que définies à l'article 6.4. Dans le cas où la création du Logiciel Dérivé a nécessité une modification du Code Source le licencié s'engage à ce que: 1. le Logiciel Modifié correspondant à cette modification soit régi par le présent Contrat, 2. les Contributions Intégrées dont le Logiciel Modifié résulte soient clairement identifiées et documentées, 3. le Licencié permette un accès effectif au code source du Logiciel Modifié, pendant au moins toute la durée de la distribution du Logiciel Dérivé, de telle sorte que ces modifications puissent être reprises dans une version ultérieure du Logiciel, étant entendu que le coût additionnel d'acquisition du code source du Logiciel Modifié ne devra pas excéder le simple coût du transfert des données. 5.3.4 COMPATIBILITE AVEC LA LICENCE CeCILL Lorsqu'un Logiciel Modifié contient une Contribution Intégrée soumise au contrat de licence CeCILL, ou lorsqu'un Logiciel Dérivé contient un Module Lié soumis au contrat de licence CeCILL, les stipulations prévues au troisième item de l'article 6.4 sont facultatives. Article 6 - PROPRIETE INTELLECTUELLE 6.1 SUR LE LOGICIEL INITIAL Le Titulaire est détenteur des droits patrimoniaux sur le Logiciel Initial. Toute utilisation du Logiciel Initial est soumise au respect des conditions dans lesquelles le Titulaire a choisi de diffuser son oeuvre et nul autre n'a la faculté de modifier les conditions de diffusion de ce Logiciel Initial. Le Titulaire s'engage à ce que le Logiciel Initial reste au moins régi par le Contrat et ce, pour la durée visée à l'article 4.2. 6.2 SUR LES CONTRIBUTIONS INTEGREES Le Licencié qui a développé une Contribution Intégrée est titulaire sur celle-ci des droits de propriété intellectuelle dans les conditions définies par la législation applicable. 6.3 SUR LES MODULES LIES Le Licencié qui a développé un Module Lié est titulaire sur celui-ci des droits de propriété intellectuelle dans les conditions définies par la législation applicable et reste libre du choix du contrat régissant sa diffusion dans les conditions définies à l'article 5.3.3. 6.4 MENTIONS DES DROITS Le Licencié s'engage expressément: 1. à ne pas supprimer ou modifier de quelque manière que ce soit les mentions de propriété intellectuelle apposées sur le Logiciel; 2. à reproduire à l'identique lesdites mentions de propriété intellectuelle sur les copies du Logiciel modifié ou non; 3. à faire en sorte que l'utilisation du Logiciel, ses mentions de propriété intellectuelle et le fait qu'il est régi par le Contrat soient indiqués dans un texte facilement accessible notamment depuis l'interface de tout Logiciel Dérivé. Le Licencié s'engage à ne pas porter atteinte, directement ou indirectement, aux droits de propriété intellectuelle du Titulaire et/ou des Contributeurs sur le Logiciel et à prendre, le cas échéant, à l'égard de son personnel toutes les mesures nécessaires pour assurer le respect des dits droits de propriété intellectuelle du Titulaire et/ou des Contributeurs. Article 7 - SERVICES ASSOCIES 7.1 Le Contrat n'oblige en aucun cas le Concédant à la réalisation de prestations d'assistance technique ou de maintenance du Logiciel. Cependant le Concédant reste libre de proposer ce type de services. Les termes et conditions d'une telle assistance technique et/ou d'une telle maintenance seront alors déterminés dans un acte séparé. Ces actes de maintenance et/ou assistance technique n'engageront que la seule responsabilité du Concédant qui les propose. 7.2 De même, tout Concédant est libre de proposer, sous sa seule responsabilité, à ses licenciés une garantie, qui n'engagera que lui, lors de la redistribution du Logiciel et/ou du Logiciel Modifié et ce, dans les conditions qu'il souhaite. Cette garantie et les modalités financières de son application feront l'objet d'un acte séparé entre le Concédant et le Licencié. Article 8 - RESPONSABILITE 8.1 Sous réserve des dispositions de l'article 8.2, le Licencié a la faculté, sous réserve de prouver la faute du Concédant concerné, de solliciter la réparation du préjudice direct qu'il subirait du fait du Logiciel et dont il apportera la preuve. 8.2 La responsabilité du Concédant est limitée aux engagements pris en application du Contrat et ne saurait être engagée en raison notamment: (i) des dommages dus à l'inexécution, totale ou partielle, de ses obligations par le Licencié, (ii) des dommages directs ou indirects découlant de l'utilisation ou des performances du Logiciel subis par le Licencié et (iii) plus généralement d'un quelconque dommage indirect. En particulier, les Parties conviennent expressément que tout préjudice financier ou commercial (par exemple perte de données, perte de bénéfices, perte d'exploitation, perte de clientèle ou de commandes, manque à gagner, trouble commercial quelconque) ou toute action dirigée contre le Licencié par un tiers, constitue un dommage indirect et n'ouvre pas droit à réparation par le Concédant. Article 9 - GARANTIE 9.1 Le Licencié reconnaît que l'état actuel des connaissances scientifiques et techniques au moment de la mise en circulation du Logiciel ne permet pas d'en tester et d'en vérifier toutes les utilisations ni de détecter l'existence d'éventuels défauts. L'attention du Licencié a été attirée sur ce point sur les risques associés au chargement, à l'utilisation, la modification et/ou au développement et à la reproduction du Logiciel qui sont réservés à des utilisateurs avertis. Il relève de la responsabilité du Licencié de contrôler, par tous moyens, l'adéquation du produit à ses besoins, son bon fonctionnement et de s'assurer qu'il ne causera pas de dommages aux personnes et aux biens. 9.2 Le Concédant déclare de bonne foi être en droit de concéder l'ensemble des droits attachés au Logiciel (comprenant notamment les droits visés à l'article 5). 9.3 Le Licencié reconnaît que le Logiciel est fourni "en l'état" par le Concédant sans autre garantie, expresse ou tacite, que celle prévue à l'article 9.2 et notamment sans aucune garantie sur sa valeur commerciale, son caractère sécurisé, innovant ou pertinent. En particulier, le Concédant ne garantit pas que le Logiciel est exempt d'erreur, qu'il fonctionnera sans interruption, qu'il sera compatible avec l'équipement du Licencié et sa configuration logicielle ni qu'il remplira les besoins du Licencié. 9.4 Le Concédant ne garantit pas, de manière expresse ou tacite, que le Logiciel ne porte pas atteinte à un quelconque droit de propriété intellectuelle d'un tiers portant sur un brevet, un logiciel ou sur tout autre droit de propriété. Ainsi, le Concédant exclut toute garantie au profit du Licencié contre les actions en contrefaçon qui pourraient être diligentées au titre de l'utilisation, de la modification, et de la redistribution du Logiciel. Néanmoins, si de telles actions sont exercées contre le Licencié, le Concédant lui apportera son aide technique et juridique pour sa défense. Cette aide technique et juridique est déterminée au cas par cas entre le Concédant concerné et le Licencié dans le cadre d'un protocole d'accord. Le Concédant dégage toute responsabilité quant à l'utilisation de la dénomination du Logiciel par le Licencié. Aucune garantie n'est apportée quant à l'existence de droits antérieurs sur le nom du Logiciel et sur l'existence d'une marque. Article 10 - RESILIATION 10.1 En cas de manquement par le Licencié aux obligations mises à sa charge par le Contrat, le Concédant pourra résilier de plein droit le Contrat trente (30) jours après notification adressée au Licencié et restée sans effet. 10.2 Le Licencié dont le Contrat est résilié n'est plus autorisé à utiliser, modifier ou distribuer le Logiciel. Cependant, toutes les licences qu'il aura concédées antérieurement à la résiliation du Contrat resteront valides sous réserve qu'elles aient été effectuées en conformité avec le Contrat. Article 11 - DISPOSITIONS DIVERSES 11.1 CAUSE EXTERIEURE Aucune des Parties ne sera responsable d'un retard ou d'une défaillance d'exécution du Contrat qui serait dû à un cas de force majeure, un cas fortuit ou une cause extérieure, telle que, notamment, le mauvais fonctionnement ou les interruptions du réseau électrique ou de télécommunication, la paralysie du réseau liée à une attaque informatique, l'intervention des autorités gouvernementales, les catastrophes naturelles, les dégâts des eaux, les tremblements de terre, le feu, les explosions, les grèves et les conflits sociaux, l'état de guerre... 11.2 Le fait, par l'une ou l'autre des Parties, d'omettre en une ou plusieurs occasions de se prévaloir d'une ou plusieurs dispositions du Contrat, ne pourra en aucun cas impliquer renonciation par la Partie intéressée à s'en prévaloir ultérieurement. 11.3 Le Contrat annule et remplace toute convention antérieure, écrite ou orale, entre les Parties sur le même objet et constitue l'accord entier entre les Parties sur cet objet. Aucune addition ou modification aux termes du Contrat n'aura d'effet à l'égard des Parties à moins d'être faite par écrit et signée par leurs représentants dûment habilités. 11.4 Dans l'hypothèse où une ou plusieurs des dispositions du Contrat s'avèrerait contraire à une loi ou à un texte applicable, existants ou futurs, cette loi ou ce texte prévaudrait, et les Parties feraient les amendements nécessaires pour se conformer à cette loi ou à ce texte. Toutes les autres dispositions resteront en vigueur. De même, la nullité, pour quelque raison que ce soit, d'une des dispositions du Contrat ne saurait entraîner la nullité de l'ensemble du Contrat. 11.5 LANGUE Le Contrat est rédigé en langue française et en langue anglaise, ces deux versions faisant également foi. Article 12 - NOUVELLES VERSIONS DU CONTRAT 12.1 Toute personne est autorisée à copier et distribuer des copies de ce Contrat. 12.2 Afin d'en préserver la cohérence, le texte du Contrat est protégé et ne peut être modifié que par les auteurs de la licence, lesquels se réservent le droit de publier périodiquement des mises à jour ou de nouvelles versions du Contrat, qui posséderont chacune un numéro distinct. Ces versions ultérieures seront susceptibles de prendre en compte de nouvelles problématiques rencontrées par les logiciels libres. 12.3 Tout Logiciel diffusé sous une version donnée du Contrat ne pourra faire l'objet d'une diffusion ultérieure que sous la même version du Contrat ou une version postérieure. Article 13 - LOI APPLICABLE ET COMPETENCE TERRITORIALE 13.1 Le Contrat est régi par la loi française. Les Parties conviennent de tenter de régler à l'amiable les différends ou litiges qui viendraient à se produire par suite ou à l'occasion du Contrat. 13.2 A défaut d'accord amiable dans un délai de deux (2) mois à compter de leur survenance et sauf situation relevant d'une procédure d'urgence, les différends ou litiges seront portés par la Partie la plus diligente devant les Tribunaux compétents de Paris. Version 1.0 du 2006-09-05. clustershell-1.6/Licence_CeCILL-C_V1-en.txt0000644000130500135250000005255111741571247017737 0ustar thiellgpocre CeCILL-C FREE SOFTWARE LICENSE AGREEMENT Notice This Agreement is a Free Software license agreement that is the result of discussions between its authors in order to ensure compliance with the two main principles guiding its drafting: * firstly, compliance with the principles governing the distribution of Free Software: access to source code, broad rights granted to users, * secondly, the election of a governing law, French law, with which it is conformant, both as regards the law of torts and intellectual property law, and the protection that it offers to both authors and holders of the economic rights over software. The authors of the CeCILL-C (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) license are: Commissariat à l'Energie Atomique - CEA, a public scientific, technical and industrial research establishment, having its principal place of business at 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris, France. Centre National de la Recherche Scientifique - CNRS, a public scientific and technological establishment, having its principal place of business at 3 rue Michel-Ange, 75794 Paris cedex 16, France. Institut National de Recherche en Informatique et en Automatique - INRIA, a public scientific and technological establishment, having its principal place of business at Domaine de Voluceau, Rocquencourt, BP 105, 78153 Le Chesnay cedex, France. Preamble The purpose of this Free Software license agreement is to grant users the right to modify and re-use the software governed by this license. The exercising of this right is conditional upon the obligation to make available to the community the modifications made to the source code of the software so as to contribute to its evolution. In consideration of access to the source code and the rights to copy, modify and redistribute granted by the license, users are provided only with a limited warranty and the software's author, the holder of the economic rights, and the successive licensors only have limited liability. In this respect, the risks associated with loading, using, modifying and/or developing or reproducing the software by the user are brought to the user's attention, given its Free Software status, which may make it complicated to use, with the result that its use is reserved for developers and experienced professionals having in-depth computer knowledge. Users are therefore encouraged to load and test the suitability of the software as regards their requirements in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in the same conditions of security. This Agreement may be freely reproduced and published, provided it is not altered, and that no provisions are either added or removed herefrom. This Agreement may apply to any or all software for which the holder of the economic rights decides to submit the use thereof to its provisions. Article 1 - DEFINITIONS For the purpose of this Agreement, when the following expressions commence with a capital letter, they shall have the following meaning: Agreement: means this license agreement, and its possible subsequent versions and annexes. Software: means the software in its Object Code and/or Source Code form and, where applicable, its documentation, "as is" when the Licensee accepts the Agreement. Initial Software: means the Software in its Source Code and possibly its Object Code form and, where applicable, its documentation, "as is" when it is first distributed under the terms and conditions of the Agreement. Modified Software: means the Software modified by at least one Integrated Contribution. Source Code: means all the Software's instructions and program lines to which access is required so as to modify the Software. Object Code: means the binary files originating from the compilation of the Source Code. Holder: means the holder(s) of the economic rights over the Initial Software. Licensee: means the Software user(s) having accepted the Agreement. Contributor: means a Licensee having made at least one Integrated Contribution. Licensor: means the Holder, or any other individual or legal entity, who distributes the Software under the Agreement. Integrated Contribution: means any or all modifications, corrections, translations, adaptations and/or new functions integrated into the Source Code by any or all Contributors. Related Module: means a set of sources files including their documentation that, without modification to the Source Code, enables supplementary functions or services in addition to those offered by the Software. Derivative Software: means any combination of the Software, modified or not, and of a Related Module. Parties: mean both the Licensee and the Licensor. These expressions may be used both in singular and plural form. Article 2 - PURPOSE The purpose of the Agreement is the grant by the Licensor to the Licensee of a non-exclusive, transferable and worldwide license for the Software as set forth in Article 5 hereinafter for the whole term of the protection granted by the rights over said Software. Article 3 - ACCEPTANCE 3.1 The Licensee shall be deemed as having accepted the terms and conditions of this Agreement upon the occurrence of the first of the following events: * (i) loading the Software by any or all means, notably, by downloading from a remote server, or by loading from a physical medium; * (ii) the first time the Licensee exercises any of the rights granted hereunder. 3.2 One copy of the Agreement, containing a notice relating to the characteristics of the Software, to the limited warranty, and to the fact that its use is restricted to experienced users has been provided to the Licensee prior to its acceptance as set forth in Article 3.1 hereinabove, and the Licensee hereby acknowledges that it has read and understood it. Article 4 - EFFECTIVE DATE AND TERM 4.1 EFFECTIVE DATE The Agreement shall become effective on the date when it is accepted by the Licensee as set forth in Article 3.1. 4.2 TERM The Agreement shall remain in force for the entire legal term of protection of the economic rights over the Software. Article 5 - SCOPE OF RIGHTS GRANTED The Licensor hereby grants to the Licensee, who accepts, the following rights over the Software for any or all use, and for the term of the Agreement, on the basis of the terms and conditions set forth hereinafter. Besides, if the Licensor owns or comes to own one or more patents protecting all or part of the functions of the Software or of its components, the Licensor undertakes not to enforce the rights granted by these patents against successive Licensees using, exploiting or modifying the Software. If these patents are transferred, the Licensor undertakes to have the transferees subscribe to the obligations set forth in this paragraph. 5.1 RIGHT OF USE The Licensee is authorized to use the Software, without any limitation as to its fields of application, with it being hereinafter specified that this comprises: 1. permanent or temporary reproduction of all or part of the Software by any or all means and in any or all form. 2. loading, displaying, running, or storing the Software on any or all medium. 3. entitlement to observe, study or test its operation so as to determine the ideas and principles behind any or all constituent elements of said Software. This shall apply when the Licensee carries out any or all loading, displaying, running, transmission or storage operation as regards the Software, that it is entitled to carry out hereunder. 5.2 RIGHT OF MODIFICATION The right of modification includes the right to translate, adapt, arrange, or make any or all modifications to the Software, and the right to reproduce the resulting software. It includes, in particular, the right to create a Derivative Software. The Licensee is authorized to make any or all modification to the Software provided that it includes an explicit notice that it is the author of said modification and indicates the date of the creation thereof. 5.3 RIGHT OF DISTRIBUTION In particular, the right of distribution includes the right to publish, transmit and communicate the Software to the general public on any or all medium, and by any or all means, and the right to market, either in consideration of a fee, or free of charge, one or more copies of the Software by any means. The Licensee is further authorized to distribute copies of the modified or unmodified Software to third parties according to the terms and conditions set forth hereinafter. 5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION The Licensee is authorized to distribute true copies of the Software in Source Code or Object Code form, provided that said distribution complies with all the provisions of the Agreement and is accompanied by: 1. a copy of the Agreement, 2. a notice relating to the limitation of both the Licensor's warranty and liability as set forth in Articles 8 and 9, and that, in the event that only the Object Code of the Software is redistributed, the Licensee allows effective access to the full Source Code of the Software at a minimum during the entire period of its distribution of the Software, it being understood that the additional cost of acquiring the Source Code shall not exceed the cost of transferring the data. 5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE When the Licensee makes an Integrated Contribution to the Software, the terms and conditions for the distribution of the resulting Modified Software become subject to all the provisions of this Agreement. The Licensee is authorized to distribute the Modified Software, in source code or object code form, provided that said distribution complies with all the provisions of the Agreement and is accompanied by: 1. a copy of the Agreement, 2. a notice relating to the limitation of both the Licensor's warranty and liability as set forth in Articles 8 and 9, and that, in the event that only the object code of the Modified Software is redistributed, the Licensee allows effective access to the full source code of the Modified Software at a minimum during the entire period of its distribution of the Modified Software, it being understood that the additional cost of acquiring the source code shall not exceed the cost of transferring the data. 5.3.3 DISTRIBUTION OF DERIVATIVE SOFTWARE When the Licensee creates Derivative Software, this Derivative Software may be distributed under a license agreement other than this Agreement, subject to compliance with the requirement to include a notice concerning the rights over the Software as defined in Article 6.4. In the event the creation of the Derivative Software required modification of the Source Code, the Licensee undertakes that: 1. the resulting Modified Software will be governed by this Agreement, 2. the Integrated Contributions in the resulting Modified Software will be clearly identified and documented, 3. the Licensee will allow effective access to the source code of the Modified Software, at a minimum during the entire period of distribution of the Derivative Software, such that such modifications may be carried over in a subsequent version of the Software; it being understood that the additional cost of purchasing the source code of the Modified Software shall not exceed the cost of transferring the data. 5.3.4 COMPATIBILITY WITH THE CeCILL LICENSE When a Modified Software contains an Integrated Contribution subject to the CeCILL license agreement, or when a Derivative Software contains a Related Module subject to the CeCILL license agreement, the provisions set forth in the third item of Article 6.4 are optional. Article 6 - INTELLECTUAL PROPERTY 6.1 OVER THE INITIAL SOFTWARE The Holder owns the economic rights over the Initial Software. Any or all use of the Initial Software is subject to compliance with the terms and conditions under which the Holder has elected to distribute its work and no one shall be entitled to modify the terms and conditions for the distribution of said Initial Software. The Holder undertakes that the Initial Software will remain ruled at least by this Agreement, for the duration set forth in Article 4.2. 6.2 OVER THE INTEGRATED CONTRIBUTIONS The Licensee who develops an Integrated Contribution is the owner of the intellectual property rights over this Contribution as defined by applicable law. 6.3 OVER THE RELATED MODULES The Licensee who develops a Related Module is the owner of the intellectual property rights over this Related Module as defined by applicable law and is free to choose the type of agreement that shall govern its distribution under the conditions defined in Article 5.3.3. 6.4 NOTICE OF RIGHTS The Licensee expressly undertakes: 1. not to remove, or modify, in any manner, the intellectual property notices attached to the Software; 2. to reproduce said notices, in an identical manner, in the copies of the Software modified or not; 3. to ensure that use of the Software, its intellectual property notices and the fact that it is governed by the Agreement is indicated in a text that is easily accessible, specifically from the interface of any Derivative Software. The Licensee undertakes not to directly or indirectly infringe the intellectual property rights of the Holder and/or Contributors on the Software and to take, where applicable, vis-à-vis its staff, any and all measures required to ensure respect of said intellectual property rights of the Holder and/or Contributors. Article 7 - RELATED SERVICES 7.1 Under no circumstances shall the Agreement oblige the Licensor to provide technical assistance or maintenance services for the Software. However, the Licensor is entitled to offer this type of services. The terms and conditions of such technical assistance, and/or such maintenance, shall be set forth in a separate instrument. Only the Licensor offering said maintenance and/or technical assistance services shall incur liability therefor. 7.2 Similarly, any Licensor is entitled to offer to its licensees, under its sole responsibility, a warranty, that shall only be binding upon itself, for the redistribution of the Software and/or the Modified Software, under terms and conditions that it is free to decide. Said warranty, and the financial terms and conditions of its application, shall be subject of a separate instrument executed between the Licensor and the Licensee. Article 8 - LIABILITY 8.1 Subject to the provisions of Article 8.2, the Licensee shall be entitled to claim compensation for any direct loss it may have suffered from the Software as a result of a fault on the part of the relevant Licensor, subject to providing evidence thereof. 8.2 The Licensor's liability is limited to the commitments made under this Agreement and shall not be incurred as a result of in particular: (i) loss due the Licensee's total or partial failure to fulfill its obligations, (ii) direct or consequential loss that is suffered by the Licensee due to the use or performance of the Software, and (iii) more generally, any consequential loss. In particular the Parties expressly agree that any or all pecuniary or business loss (i.e. loss of data, loss of profits, operating loss, loss of customers or orders, opportunity cost, any disturbance to business activities) or any or all legal proceedings instituted against the Licensee by a third party, shall constitute consequential loss and shall not provide entitlement to any or all compensation from the Licensor. Article 9 - WARRANTY 9.1 The Licensee acknowledges that the scientific and technical state-of-the-art when the Software was distributed did not enable all possible uses to be tested and verified, nor for the presence of possible defects to be detected. In this respect, the Licensee's attention has been drawn to the risks associated with loading, using, modifying and/or developing and reproducing the Software which are reserved for experienced users. The Licensee shall be responsible for verifying, by any or all means, the suitability of the product for its requirements, its good working order, and for ensuring that it shall not cause damage to either persons or properties. 9.2 The Licensor hereby represents, in good faith, that it is entitled to grant all the rights over the Software (including in particular the rights set forth in Article 5). 9.3 The Licensee acknowledges that the Software is supplied "as is" by the Licensor without any other express or tacit warranty, other than that provided for in Article 9.2 and, in particular, without any warranty as to its commercial value, its secured, safe, innovative or relevant nature. Specifically, the Licensor does not warrant that the Software is free from any error, that it will operate without interruption, that it will be compatible with the Licensee's own equipment and software configuration, nor that it will meet the Licensee's requirements. 9.4 The Licensor does not either expressly or tacitly warrant that the Software does not infringe any third party intellectual property right relating to a patent, software or any other property right. Therefore, the Licensor disclaims any and all liability towards the Licensee arising out of any or all proceedings for infringement that may be instituted in respect of the use, modification and redistribution of the Software. Nevertheless, should such proceedings be instituted against the Licensee, the Licensor shall provide it with technical and legal assistance for its defense. Such technical and legal assistance shall be decided on a case-by-case basis between the relevant Licensor and the Licensee pursuant to a memorandum of understanding. The Licensor disclaims any and all liability as regards the Licensee's use of the name of the Software. No warranty is given as regards the existence of prior rights over the name of the Software or as regards the existence of a trademark. Article 10 - TERMINATION 10.1 In the event of a breach by the Licensee of its obligations hereunder, the Licensor may automatically terminate this Agreement thirty (30) days after notice has been sent to the Licensee and has remained ineffective. 10.2 A Licensee whose Agreement is terminated shall no longer be authorized to use, modify or distribute the Software. However, any licenses that it may have granted prior to termination of the Agreement shall remain valid subject to their having been granted in compliance with the terms and conditions hereof. Article 11 - MISCELLANEOUS 11.1 EXCUSABLE EVENTS Neither Party shall be liable for any or all delay, or failure to perform the Agreement, that may be attributable to an event of force majeure, an act of God or an outside cause, such as defective functioning or interruptions of the electricity or telecommunications networks, network paralysis following a virus attack, intervention by government authorities, natural disasters, water damage, earthquakes, fire, explosions, strikes and labor unrest, war, etc. 11.2 Any failure by either Party, on one or more occasions, to invoke one or more of the provisions hereof, shall under no circumstances be interpreted as being a waiver by the interested Party of its right to invoke said provision(s) subsequently. 11.3 The Agreement cancels and replaces any or all previous agreements, whether written or oral, between the Parties and having the same purpose, and constitutes the entirety of the agreement between said Parties concerning said purpose. No supplement or modification to the terms and conditions hereof shall be effective as between the Parties unless it is made in writing and signed by their duly authorized representatives. 11.4 In the event that one or more of the provisions hereof were to conflict with a current or future applicable act or legislative text, said act or legislative text shall prevail, and the Parties shall make the necessary amendments so as to comply with said act or legislative text. All other provisions shall remain effective. Similarly, invalidity of a provision of the Agreement, for any reason whatsoever, shall not cause the Agreement as a whole to be invalid. 11.5 LANGUAGE The Agreement is drafted in both French and English and both versions are deemed authentic. Article 12 - NEW VERSIONS OF THE AGREEMENT 12.1 Any person is authorized to duplicate and distribute copies of this Agreement. 12.2 So as to ensure coherence, the wording of this Agreement is protected and may only be modified by the authors of the License, who reserve the right to periodically publish updates or new versions of the Agreement, each with a separate number. These subsequent versions may address new issues encountered by Free Software. 12.3 Any Software distributed under a given version of the Agreement may only be subsequently distributed under the same version of the Agreement or a subsequent version. Article 13 - GOVERNING LAW AND JURISDICTION 13.1 The Agreement is governed by French law. The Parties agree to endeavor to seek an amicable solution to any disagreements or disputes that may arise during the performance of the Agreement. 13.2 Failing an amicable solution within two (2) months as from their occurrence, and unless emergency proceedings are necessary, the disagreements or disputes shall be referred to the Paris Courts having jurisdiction, by the more diligent Party. Version 1.0 dated 2006-09-05. clustershell-1.6/ChangeLog0000644000130500135250000011547211741571247015207 0ustar thiellgpocre2012-04-08 S. Thiell * Version 1.6 released. * doc/guide: Add ClusterShell User and Programming Guide LaTeX source to repository. 2012-04-07 S. Thiell * doc/examples/check_nodes.py: Add simple example of event-driven script. 2012-03-31 S. Thiell * CLI/Nodeset.py: Allow -a and common nodeset operations when using -l to list belonging groups (a new 1.6 feature, see ticket #162). 2012-03-29 S. Thiell * Worker/Worker.py: added documentation for worker.current_[node,msg,errmsg,rc] variables (ticket #160). * Task.py: timeout parameters better explained (ticket #157). 2012-03-28 S. Thiell * CLI/OptionParser.py: Add --diff option to enable diff display between gathered outputs. Enabled in clush and clubak (ticket #176). * CLI/Display.py: Add _print_diff() and flush() methods. * Task.py: Initialize MsgTree instances in constructor according to default values in order to allow no-op calls to buffer getters before resume() (ticket #186). 2012-03-26 S. Thiell * CLI/Clush.py: Fix clush --[r]copy behavior when no source directory is specified (ticket #172). * CLI/Clush.py: Fix interactive mode gather/standard toggle error, when using special character '=' (ticket #171). * CLI/Clubak.py: Add -v/-q verbosity options (ticket #174). 2012-03-24 S. Thiell * CLI/Clubak.py: Add --interpret-keys=never,always,auto option to clubak to allow a more generic usage of clubak, ie. even in cases where keys are not nodeset compliant (ticket #180). 2012-03-21 S. Thiell * conf/groups.conf: Fix group cross reference issue (ticket #183), we now use sed commands instead of awk ones in this default groups.conf file. 2012-03-18 S. Thiell * conf/groups.conf: Fix default source regexp for mawk (ticket #178). * Packaging: Add groups.conf.d directory and sample files. 2012-03-17 S. Thiell * CLI/Nodeset.py: Add support for -l[ll] to list belonging groups (CLI interface to NodeSet.groups()) (ticket #162). * NodeSet: Add groups() public method to list groups nodeset belongs to. 2012-03-15 S. Thiell * NodeUtils.py: Add groupsdir option (ticket #179). 2012-03-14 S. Thiell * CLI/Nodeset.py: Add --contiguous splitting option (ticket #173). * NodeSet.py: Add contiguous() iterator. * RangeSet.py: Add contiguous() iterator. * RangeSet.py: Allow slice object in fromone() constructor. 2012-02-26 S. Thiell * Gateway.py: Improved logging facility, configurable through CLUSTERSHELL_GW_LOG_DIR and CLUSTERSHELL_GW_LOG_LEVEL environment variables from the root node. * Communication.py: Messages are now transfered in xml payload instead of 'output' attribute for improved handling of multi-lines messages in StdOutMessage and StdErrMessage. 2012-02-24 S. Thiell * Worker/EngineClient.py: Fix gateway write performance issue, as seen on a very large cluster with a no-grooming test case and lots of small messages sent, by calling os.write() as soon as possible (might safely fail if not ready as we are in non-blocking mode). * NodeSet.py: Internal parsing optimization by adding a "should copy RangeSet object?" flag to NodeSetBase constructor in order to save useless but slightly costly RangeSet.copy() calls. * NodeSet.py: Small rangeset parsing optimization on single node string parsing code. 2012-02-19 S. Thiell * NodeSet.py: Add NodeSet.nsiter(), a fast iterator on nodes as NodeSet objects to avoid object-to-string-to-object conversion in some cases when using __iter__() -- like in PropagationTreeRouter.dispatch(). 2012-02-15 S. Thiell * Clush.py: Add --topology hidden option to enable V2 tree propagation technology preview. 2012-02-01 S. Thiell * RangeSet.py: Fix RangeSet.__setstate__() for proper object unpickling from older RangeSet versions. Add unpickling tests. 2012-01-28 S. Thiell * RangeSet.py: Discard AVL-tree based implementation, as we noticed that built-in set is much faster. New implementation is based on built-in set, and slightly changes padding and __iter__() behaviors. Padding value is now accessible and settable at any time via a public variable "padding". Auto-detection of padding is still available, but it is used as a convenience for printing range sets. Moreover, all set-like operations are now only based on integers, ignoring RangeSet's padding value. __iter__() has been changed in order to iterate over sorted inner set integers, instead of string items. A new method striter() is available to iterate over string padding-enabled items. These changes allow us to offer a full set-like API for RangeSet (new methods like isdisjoint(), pop(), etc. are available according to your Python version). Also, a new constructor that take any iterable of integers is available. Finally, this implementation should much more faster than all previous ones, especially for large range sets (ten thousand and more) with lots of holes. 2012-01-10 S. Thiell * RangeSet.py: Move RangeSet class from NodeSet.py to this new module dedicated to scalable management of cluster range sets (tens of thousands of disjoint ranges). Change internal algorithm used to manage ranges from a list to an AVL-tree based on bintrees project's avltree implementation. Got rid of expand/fold() methods that don't scale, all sets-like methods have been rewritten using AVL-tree. 2012-01-04 S. Thiell * Task.py: Change behavior of shell()'s tree=None (auto) parameter: added Task default parameter "auto_tree" defaulting to False and checked by shell() when tree=None. This means that even with a valid topology configuration file, the user has to explicitly enable tree mode for now. This is for next 1.6 release and should be changed to True in version 2.0. 2011-11-28 S. Thiell * Task.py: Fix 'tree' option of shell(), which can be either True (force enable tree mode), False (disable tree mode) and None (automatic). 2011-11-24 S. Thiell * CLI/Clush.py: Enable tree mode by default with grooming option. * Worker/Tree.py: Integrate WorkerTree within ClusterShell Engine framework, it will be used instead of PropagationTree. * Engine/Engine.py: Inhibit any engine client changes when client is not registered. * Topology.py: Change DEFAULT section to Main section in topology.conf. Cosmetic changes. 2011-06-09 S. Thiell * Version 1.5.1 released. * NodeSet.py: Added workaround to allow pickling/unpickling of RangeSet objects for Python 2.4 (ticket #156). 2011-06-08 S. Thiell * Version 1.5 released (Sedona release). 2011-06-07 S. Thiell * MsgTree.py: Improved MsgTree API to lighten updates of keys associated to tree elements (ticket #131). * CLI/Clubak.py: Updated for new MsgTree API and added a -F/--fast switch to enable preloading of whole messages to speed up processing, but with an increase of memory consumption (ticket #131). 2011-05-31 S. Thiell * NodeSet.py: Optimized NodeSet.fromlist() method by adding updaten() method which is quite O(num_patterns). 2011-05-29 S. Thiell * NodeSet.py: Fixed missing autostep check in _fold() which could lead to autostep not being taken into account (ticket #150). * Worker/Ssh.py: Fix scp user option in Scp class (ticket #152). * Engine/*.py: Internal engine design change: do not retry engine eventloop on any EngineClient registration changes, so process more events by chunk (should be faster) and add a loop iteration counter to work around internally re-used FDs (finalize ticket #153). 2011-05-26 S. Thiell * Worker/EngineClient.py: Enable fastsubprocess module, and use file descriptors instead of file objects everywhere (ticket #153). * Worker/fastsubprocess.py: Faster, relaxed version of Python 2.6 subprocess.py with non blocking fd support. 2011-05-15 S. Thiell * Engine/Engine.py: Improved start_all() fanout algorithm by adding a separate pending clients list. * Created 1.5 branch. 2011-03-19 S. Thiell * Version 1.4.3 released. * CLI/Nodeset.py: Make stdin '-' keyword work when used for -i/x/X operations (ticket #148). * CLI/Clush.py: Fixed issue when using clush -bL (missing argument) due to latest 1.4.2 changes. Added tests/ClushScriptTest.py to detect that in the future (ticket #147). 2011-02-15 S. Thiell * Version 1.4.2 released. 2011-03-12 S. Thiell * NodeSet.py: Fixed issues with objects copying, so got rid of copy module and added optimized RangeSet.copy() and NodeSet.copy() methods (ticket #146). 2011-03-09 S. Thiell * CLI/Clush.py: Added running progress indicator for --[r]copy commands. 2011-03-08 S. Thiell * CLI/Clush.py: Improved -v switch (closes ticket #100: print live node output plus noderange-grouped output at the end). * CLI/Clubak.py: Add -T,--tree message tree mode option (ticket #144). * MsgTree.py: Class initialization variant (trace mode) to keep track of old keys/nodes for each message (part of #144). 2011-03-06 S. Thiell * CLI/Clush.py: Implement clush -L (not -bL) to order output by nodename, like clubak -L (ticket #141). * CLI/Nodeset.py: Added -I/--slice command option to select node(s) by index(es) or RangeSet-style slice (ticket #140). * CLI/Nodeset.py: Remove pending limitation when using -[ixX] operations with nodesets specified by -a (all) or through stdin. * NodeSet.py: Add RangeSet.slices() method. 2011-03-05 S. Thiell * NodeSet.py: Internal changes to use slice type to represent ranges in RangeSet. Changed RangeSet.add_range() 'stop' argument semantic, it is now conforming to range()'s one. * NodeSet.py: Fix issue with in-place operators returning None. Added tests. 2011-02-27 S. Thiell * NodeSet.py: Fix issue when using negative index or negative slice indices for RangeSet and NodeSet. 2011-02-24 S. Thiell * CLI/Nodeset.py: Add -ll and -lll extended options to list corresponding group nodes, and also group node count (ticket #143). 2011-02-13 S. Thiell * Version 1.4.1 released. 2011-02-08 S. Thiell * CLI/Config.py: Add fd_max integer parameter to set the max number of open files (soft limit) permitted per clush process. This will fix an issue on systems where hard limit is not reasonable. 2011-02-07 S. Thiell * CLI/OptionParser.py: Add clush -E hidden option to enforce a specific I/O events engine (should not be needed, but can be useful for testing). Improve engine selection error handling. 2011-02-06 S. Thiell * Engine/Select.py: New select()-based engine (from H. Doreau, ticket #8). * CLI/{Clush,Display}.py: Do not display exit code with clush when -qS is specified (ticket #117). * CLI/Clush.py: Allow clush to run without argument when stdin is not a tty, by disabling ssh pseudo-tty allocation. You can now type `echo uname | clush -w ` (ticket #134). * Worker/Ssh.py: Fix issue when more than one ssh options are specified with -o or in clush.conf (ticket #138). 2011-02-05 S. Thiell * CLI/Clush.py: Fix issue when executing local command with clush -b in interactive mode (eg. !uname). * Worker/Worker.py: Define new current_node, current_msg, current_errmsg and current_rc Worker variables, updated at each event (last_read(), last_node() and last_retcode() will be deprecated from version 2.0). * Worker/*.py: Performance: removed _invoke() indirections when generating events + local variables optimization. * Task.py: Performance: replaced _TaskMsgTree metaclass by direct calls to MsgTree methods + local variables optimization. * Worker/Ssh.py: Local variables optimization. * CLI/Clush.py: Do not disable internal messages gathering when using -bL for proper display after Ctrl-C interruption (#133). 2011-01-26 S. Thiell * tests/config: test config-template directory created. 2011-01-17 S. Thiell * Communication.py: New module from 2.0 dev branch (author: H. Doreau). * Gateway.py: New module from 2.0 dev branch (author: H. Doreau). * Propagation.py: New module from 2.0 dev branch (author: H. Doreau). * Topology.py: New module from 2.0 dev branch (author: H. Doreau). 2011-01-15 S. Thiell * Version 1.4 released. * NodeSet.py: Add docstring for NodeSet string arithmetics (, ! & ^), which is also called extended string pattern (trac ticket #127). 2010-12-14 S. Thiell * Version 1.4 beta 1 released. * CLI/Display.py: In buffer header (for -b/-B without -L), print node count in brackets if > 1 and enabled by configuration (trac ticket #130). * CLI/Config.py: Add boolean node_count param (part of trac ticket #130). 2010-12-08 S. Thiell * CLI/Nodeset.py: Support nodeset --split option (trac ticket #91). * CLI/OptionParser.py: Add --split option (part of #91). * NodeSet.py: Avoid overflow by returning truncated results when there are not enough elements in the set for RangeSet.split(n) and NodeSet.split(n). 2010-12-02 S. Thiell * NodeSet.py: Much improved algorithm for RangeSet.add_range(). 2010-11-30 S. Thiell * Worker/{Popen,Pdsh,Ssh}.py: Tell system to release associated resources with the child process on abort. 2010-11-30 S. Thiell * Worker/Popen.py: Fix stderr pipe leak (trac ticket #121). * Worker/Ssh.py: Fix stderr pipe leak (trac ticket #121). * Worker/Pdsh.py: Fix stderr pipe leak (trac ticket #121). * tests/TaskRLimitsTest.py: New test. 2010-11-28 S. Thiell * NodeSet.py: Optimized NodeSet.__getitem__() (trac ticket #18). 2010-11-25 S. Thiell * NodeSet.py: Slice-optimized version of RangeSet.__getitem__(). 2010-11-03 S. Thiell * CLI/Clush.py: Added --rcopy support (trac ticket #55). * Task.py: Added rcopy() method (part of trac ticket #55). * Worker/Pdsh.py: Support for reverse file copy (part of trac ticket #55). * Worker/Ssh.py: Support for reverse file copy (part of trac ticket #55). 2010-11-02 S. Thiell * Worker/Ssh.py: Fix missing ev_start trigger when using task.copy() (trac ticket #125). 2010-11-01 S. Thiell * CLI/OptionParser.py: Make -c/--copy an option that can take several source arguments. * CLI/Clush.py: Improve signal handling (trac ticket #65). 2010-10-25 S. Thiell * CLI/Clush.py: Add launched-in-background checks before enabling user interaction (fix trac ticket #114). 2010-10-20 S. Thiell * Task.py: Docstring improvements (trac tickets #120, #122). 2010-10-20 A. Degremont * NodeSet.py: Optimize NodeSetBase iteration. 2010-10-17 S. Thiell * Engine/Factory.py: Re-enable EPoll engine (closes trac ticket #56). * Engine/EPoll.py: Cleanup and minor fix in the way event masks are modified. * CLI/Clush.py: Changed the way of reading stdin, which is now based on blocking reads using a specified thread and thread-safe messaging with acknowledgement using a task port (part of trac ticket #56). 2010-10-11 S. Thiell * Worker/Worker.py: Add Worker.abort() base method and ensure proper implementation in all workers (trac ticket #63). 2010-10-10 S. Thiell * Worker/Worker.py: WorkerBadArgumentError exception is now deprecated, use ValueError instead. Also added exception message in each worker (trac ticket #116). 2010-10-01 A. Degremont * Task.py: Add Task.run() new method (trac ticket #119). 2010-09-28 S. Thiell * CLI/OptionParser.py: Do not allow option value starting with '-' in some cases. 2010-09-26 S. Thiell * CLI: Package created. 2010-09-03 S. Thiell * Worker/Ssh.py: Fix issue with clush -l USER by separating underlying ssh "-l USER" in two shell arguments (trac ticket #113). 2010-08-31 S. Thiell * scripts/clush.py: Live per-line gathering (-bL mode) improvements. * Task.py: Fixed Task.timer() when called from another thread - it used to return None (trac ticket #112). 2010-08-29 S. Thiell * Task.py: Add docstring for timer's autoclose feature (trac ticket #109). * Worker/Worker.py: Attribute 'last_errmsg' not properly initialized (trac ticket #107). * setup.py: Switch to setuptools. * clustershell.spec.in: Fix issue on el5 with if condition when defining python_sitelib. 2010-08-26 S. Thiell * Packaging automation engineering and improved specfile. * License files converted to UTF-8. 2010-07-27 S. Thiell * Version 1.3 released. 2010-07-21 S. Thiell * Version 1.3 RC 2 released. * NodeSet.py: Like in some previous version, support None as argument for most methods (trac ticket #106). 2010-07-16 S. Thiell * scripts/clush.py: Fix uncaught exceptions introduced in 1.3 RC 1 (trac ticket #105). 2010-07-12 S. Thiell * Version 1.3 RC 1 released. * Task.py: Raise proper KeyError exception in Task.key_retcode(key) when key is not found in any finished workers (trac ticket #102). 2010-07-06 S. Thiell * Task.py: Added documentation for reserved set_default() and set_info() keys (trac ticket #101). * scripts/clubak.py: Merge latest code display changes made on clush to clubak, including "--color={never,always,auto}" (trac ticket #89). Updated documentation accordingly. 2010-06-29 H. Doreau * Worker/Pdsh.py: removed obsolete _read() and _readerr() methods that overrode EngineClient methods without raising an EOFException when read() reads nothing (trac ticket #97). 2010-06-28 S. Thiell * scripts/clush.py: Centralized handling of exceptions raised from Main and separate Task thread because some exceptions handled only in Main thread were not caught (fix btw trac ticket #93). 2010-06-17 S. Thiell * Version 1.3 beta 6 released. 2010-06-16 S. Thiell * scripts/clush.py: Check for trailing args when using -c/--copy (trac ticket #88). * NodeSet.py, NodeUtils.py: Add a way to retrieve all nodes when "all" external call is missing but "map" and "list" calls are specified (trac ticket #90). * Task.py: Add handling of stderr during task.copy(). * Worker/Ssh.py: Add handling of stderr (when needed) during scp. * scripts/clush.py: Fix display issue with clush --copy when some nodes are not reachable. * Version 1.3 beta 5 released. 2010-06-15 S. Thiell * scripts/clush.py: Add --color={never,always,auto} command line option and color: {never,always,auto} config option (trac ticket #68), defaulting to `never'. Also did some code refactoring/lightening (created a Display class). Updated clush and clush.conf man pages. 2010-06-09 S. Thiell * scripts/clush.py: Automatically increase open files soft limit (trac ticket #61). Handle "Too many open files" exception. * Task.py: Add excepthook and default_excepthook methods to handle uncaught exception in a Task thread. Make it compliant with sys.excepthook also. 2010-06-08 S. Thiell * Version 1.3 beta 4 released. * doc/extras/vim/syntax/groupsconf.vim: Improved vim syntax file for groups.conf (trac ticket #85): now $GROUP and $NODE are keywords. * scripts/clush.py: Do not wait the end of all commands when using -bL switches when possible (trac ticket #69). * MsgTree.py: Added remove(match) method to remove entry from the tree. * Task.py: Added flush_buffers() and flush_errors() methods. * Worker/Worker.py: Added flush_buffers() and flush_errors() methods. 2010-05-26 S. Thiell * Version 1.3 beta 3 released. * scripts/clush.py: Fixed issue (-g/-X group not working as expected) found in release 1.3 beta2. 2010-05-25 S. Thiell * Version 1.3 beta 2 released. * scripts/clush.py: Added -G, --groupbase to strip group source prefix when using -r. * scripts/clubak.py: Added -G, --groupbase to strip group source prefix when using -r. * scripts/nodeset.py: Changed -N, --noprefix to -G, --groupbase to avoid conflict with clush -N. * scripts/clush.py: Fixed missing support for group source (-s GROUPSOURCE) when using -a or -g GROUP. * scripts/nodeset.py: Added --all, -a support (also work is -s GROUPSOURCE). Almost-silently removed -a for --autostep, I hope nobody's using it. :) * Updated man pages of clush, clubak and nodeset to match latest options changes (trac #58). * scripts/clubak.py: Added regroup support to clubak (trac ticket #78). Added -S to specify user settable separator string (trac ticket #62). 2010-05-24 S. Thiell * tests/NodeSetGroupTest.py: Some cleanup in tests (use setUp, tearDown) and create temporary groups test files. * tests/NodeSetRegroupTest.py: Removed (tests moved to NodeSetGroupTest.py). * scripts/nodeset.py: Add -N option to avoid display of group source prefix (trac ticket #79). * NodeSet.py: Add noprefix boolean option to regroup() to avoid building nodegroups with group source prefixes. Added test. * scripts/clush.py: Fix unhandled GroupResolverSourceError exception (part of trac ticket #74). * scripts/nodeset.py: Renamed -n NAMESPACE option to -s GROUPSOURCE (or --groupsource=GROUPSOURCE). Fixed trac ticket #76 so that -f, -e or -c take -s into account. Improved error handling (trac ticket #74). Added --groupsources command to list configured group sources (trac #77). 2010-05-20 S. Thiell * tests/NodeSetRegroupTest.py: added tests for nodeset.regroup(). 2010-05-19 S. Thiell * doc/extras/vim/ftdetect/clustershell.vim: renamed clush.vim to clustershell.vim. * doc/extras/vim/syntax/clushconf.vim: renamed clush.vim to clushconf.vim and cleaned up old external groups keywords. * doc/extras/vim/syntax/groupsconf.vim: added vim syntax file for groups.conf (trac ticket #73). 2010-04-08 S. Thiell * NodeSet.py: Added __getstate__() and __setstate__() methods to support pickling of NodeSet objects. * scripts/clush.py: Add option flag -n NAMESPACE to specify groups.conf(5) namespace to use for regrouping displayed nodeset. * scripts/clush.py: Add -r (--regroup) option to display default groups in nodeset when possible. 2010-04-07 S. Thiell * scripts/clush.py: Modified script to support new external "all nodes" upcall and node groups. * scripts/nodeset.py: Added command flags -l (list groups), -r (used to regroup nodes in groups), and also added option flag -n to specify desired namespace. * NodeSet.py: Added node group support with the help of the new NodeUtils module (trac ticket #41). Improved parser to support basic node/nodegroups arithmetics (trac ticket #44). * NodeUtils.py: New module that provides binding support to external node group sources (trac ticket #43). 2010-03-05 S. Thiell * Worker/*.py: Do not forget to keep last line and generate an ev_read event when it does not contain EOL (trac ticket #66). Added tests. 2010-02-26 S. Thiell * Version 1.2 RC 1 released. 2010-02-25 S. Thiell * Important code cleaning (use absolute imports, remove some unused imports, remove duplicate code, etc. thanks to pylint). 2010-02-22 S. Thiell * scripts/nodeset.py: Change command syntax: operations are now specified inline between nodesets (trac ticket #45). Update doc and tests. * scripts/clubak.py: Fix TypeError exception raised on unexpected input and accept 'node:message' line pattern (trac ticket #59). * scripts/clush.py: Add -B flag (trac ticket #60) to gather with stderr. * NodeSet.py: NodeSet constructor now raises a NodeSetParseError exception when unsupported type is used as input (trac ticket #53). 2010-02-21 S. Thiell * Task.py: Fix a deadlock when a task is resumed two times from another thread (raise AlreadyRunningError instead). Added test. * Worker/Worker.py: Improve usage error handling for some methods (trac ticket #28), raising WorkerError when needed. Add library misusage tests. 2010-02-18 S. Thiell * scripts/clush.py: Disable MsgTree buffering when not performing any gathering of results (when -b is not used). * Task.py: Allow disabling of MsgTree buffering (trac ticket #3) via 'stdout_msgtree" and 'stderr_msgtree' Task default keywords, useful if we don't want MsgTree internal buffering for fully event-based scripts (eg. clush without -b). When disabled, any Task method accessing MsgTree data like iter_buffers() will raise a new exception (TaskMsgTreeError). 2010-02-17 S. Thiell * Version 1.2 beta 5 released. 2010-02-16 S. Thiell * NodeSet.py: Fix mixed-type comparisons, where, like standard set(), are allowed, instead of raising TypeError. 2010-02-15 S. Thiell * Version 1.2 beta 4 released. * MsgTree.py: Added MsgTreeElem.splitlines() method as alias of lines(). 2010-02-14 S. Thiell * Updated doc/man pages for latest clush changes and added clubak tool. * Worker/Ssh.py: Fix Ssh worker issue where sometimes stderr buffer could not be read completely (trac ticket #50). 2010-02-13 S. Thiell * scripts/clush.py: Comply with clubak by adding -L option that allow switching to alternative line mode display (when using -b). Also, sort buffers by nodes or nodeset length like clubak (fix trac ticket #54). 2010-02-11 S. Thiell * Version 1.2 beta 3 released. * scripts/clush.py: For clush --copy, when --dest is not specified, set the destination path to the source dirname path and not the source full path. * scripts/clush.py: Added option --nostdin to prevent reading from standard input (fix trac ticket #49). * Engine/Factory.py: Disable Engine.EPoll automatic selection as an issue has been found with clush when stdin is a plain file ( * Worker/Worker.py: Added missing WorkerSimple.last_error() method. Fixed worker bad argument error exception. * Worker/Ssh.py: Added command, source and dest public instance variable. * Worker/Pdsh.py: Added command, source and dest public instance variable. * scripts/clush.py: Due to set_info() behaviour modifications in multi-thread mode, change some set_info() for set_default() to modify task specific dictionary synchronously. Also remove splitlines() where MsgTreeElem are returned instead of whole buffer after latest MsgTree improvements. * scripts/clubak.py: Added clubak utility (trac ticket #47). It provides dshbak backward-compatibility, but always try to sort buffers by nodes or nodeset. It also provides additional -L option to switch to alternative line mode display. 2010-02-09 S. Thiell * Worker.py: Updated Task/MsgTree dependencies. Added iter_node_errors() method. Added match_keys optional parameter to iter_node_buffers() and iter_node_errors(). Added WorkerSimple.error() method (read stderr). Added tests. * Task.py: Updated MsgTree dependencies. Factorized most tree data's access methods. * MsgTree.py: Merged Msg and _MsgTreeElem in one class MsgTreeElem. All message objects returned are now instance of MsgTreeElem. Some algorithms improvements. Renamed main MsgTree access methods: messages(), items() and walk(). Added more docstring. * NodeSet.py: Modified NodeSet.__iter__() and __str__() so that nodes are now always sorted by name/pattern (eg. acluster2, bcluster1). 2010-02-07 S. Thiell * MsgTree.py: Rewrite of MsgTree module with a better API (part of trac ticket #47). Adapted library classes. Added specific tests. 2010-02-02 S. Thiell * Task.py: Add Task.key_error() and its alias node_error() methods for easy retrieving of error buffers for a specified key/node. * scripts/clush.py: Fix stdout/stderr separation issue (introduced in 1.2b2) thanks to the new Task.set_default() method. * Task.py: As set_info() is now dispatched through the task special port, and applied only on task.resume() when called from another thread, add two new methods default() and set_default() to synchronously manage another task specific dictionary, useful for default configuration parameters. 2010-02-01 S. Thiell * Version 1.2 beta 2 released. 2010-02-01 A. Degremont * NodeSet.py: Added __getslice__() and split() method to RangeSet. Added split() to NodeSet (trac ticket #18). 2010-02-01 S. Thiell * NodeSet.py: Added equality comparisons for RangeSet and NodeSet. Fixed a bug in NodeSet.issuperset(). * mkrpm.sh: Improve RPM build process and allow SRPM package to be easily rebuilt (trac ticket #51). 2010-01-31 S. Thiell * scripts/clush.py: Fix broken pipe issue (trac ticket #34). * scripts/clush.py: Fix unhandled NodeSet parse error (trac ticket #36). * scripts/clush.py: Display uncompleted nodes on keyboard interrupt. 2010-01-29 S. Thiell * scripts/clush.py: Return some error code when -S -u TIMEOUT is used and some command timeout occurred (trac ticket #48). * scripts/clush.py: Display output messages on KeyboardInterrupt (trac ticket #22). * tests/TaskThreadJoinTest.py: Added test cases for task.join(). * tests/TaskThreadSuspendTest.py: Added test cases for task.suspend(). * tests/TaskPortTest.py: Added test cases for task.port(). * Task.py: Improved features in multithreaded environments thanks to new port feature: abort(), suspend(), resume(), schedule(), etc. are now thread-safe (trac ticket #21). * Worker/EngineClient.py: Added port feature, a way to communicate between different tasks. 2009-12-09 A. Degremont * scripts/clush.py: Add -X flag to exclude node groups. Node flags -w/-x/-g/-X can now be specified multiple times. 2009-12-17 S. Thiell * Engine/Factory.py: Add engine automatic selection mechanism (trac ticket #10). * Task.py: Add task_terminate() function for convenience. 2009-12-15 S. Thiell * scripts/clush.py: Fix clush -q/--quiet issue again! 2009-12-09 A. Degremont * scripts/nodeset.py: Protect --separator from code injection and handle gracefully incorrect separtor. 2009-12-09 S. Thiell * Version 1.2 beta 1 released. * scripts/clush.py and library: Add -p option when using --copy to preserve file modification times and modes. * scripts/clush.py: Fix clush -q/--quiet issue. * scripts/nodeset.py: Add separator option to nodeset --expand with -S (trac ticket #39). * Worker/Pdsh.py: Added copy support for directory (automatic detection). Added non-reg tests. 2009-12-08 S. Thiell * scripts/clush.py: Added source presence check on copy. 2009-12-07 S. Thiell * Worker/Ssh.py: Added copy support for directory (automatic detection). * Worker/Ssh.py: Fix Scp Popen4->subprocess.popen issue (simple quote escape not needed). 2009-11-10 S. Thiell * Version 1.2 beta 0 released. Updated doc and man pages for 1.2. 2009-11-09 S. Thiell * Engine/EPoll.py: Add stdout/stderr support (still experimental). * Worker/Pdsh.py: Fix stdout/stderr support. * Backport recent 1.1-branch improvements: tests code coverage, also resulting in some fixes (see 1.1 2009-10-28). 2009-11-09 S. Thiell * scripts/clush.py: Added stdout/stderr support in clush script. 2009-11-04 S. Thiell * Added optional separate stdout/stderr handling (with 1.1 Task API compat). Added some tests for that. * Create a MsgTree class in MsgTree.py and remove this code from Task.py. * First changes to use setUp() in test case objects. 2009-08-02 S. Thiell * clush.py: (1) remove /step in displayed nodeset when using -b (to allow copy/paste to other tools like ipmipower that doesn't support N-M/STEP ranges), (2) when command timeout is specified (-u), show nodes (on stderr) that didn't have time to fully complete, (3) flush stdio buffers before exiting. [merged from branch 1.1] 2009-07-29 S. Thiell * tests/NodeSetScriptTest.py: added unit test for scripts/nodeset.py * NodeSet.py: fixed a problem with intersection_update() when used with two simple nodes (no rangeset). * scripts/nodeset.py: merge -i and -X options issue fix from 1.1 branch (#29) 2009-07-28 S. Thiell * scripts/clush.py: remove DeprecationWarning ignore filter (the library is now natively Python 2.6/Fedora 11 ready) * Change all sets to use built in set type available since Python 2.4 (the sets module is deprecated). * Engine/EPoll.py: added epoll based Engine (Python 2.6+ needed) * Engine/Poll.py: added _register_specific() and _unregister_specific() methods to match modified Engine base class. * Engine/Engine.py: added calls to derived class's _register_specific() and _unregister_specific() instead of only _modify_specific() 2009-07-23 S. Thiell * Replaced popen2.Popen4 (deprecated) by subprocess.Popen (Python 2.4+), renaming Worker.Popen2 to Worker.Popen. * clush.py: (backport for 1.1 branch) fix another command_timeout (-u) issue, now the command_timeout value is passed as the timeout value at worker level. * Version 1.1 branched. 2009-07-22 S. Thiell * Version 1.1 RC 2 released. * clush.py: change -u timeout behavior, if set it's now the timeout value passed to task.shell() (and not connect_timeout + command_timeout). * clush.py: add -o options to pass custom ssh options (#24). * Worker/Ssh.py: simple quote escape fix (trac ticket #25). * Worker/Popen2.py: simple quote escape fix (trac ticket #25) * clush.py: fix options issue when using -f, -u or -t. 2009-07-13 S. Thiell * Version 1.1 RC 1 released. * Changed license to CeCILL-C (http://www.cecill.info). * clush.py (ttyloop): (feature) added '=' special command in interactive mode to toggle output format mode (standard/gathered). * Engine/Engine.py (register): (bug) register writer fd to even when set_writer_eof() has previously been called. * Worker/EngineClient.py (_handle_write): (bug) don't close writer when some data remains in write buffer, even if self._weof is True. 2009-07-10 S. Thiell * clush.py (ttyloop): added a workaround to replace raw_input() which is not interruptible in Python 2.3 (issue #706406). 2009-07-09 S. Thiell * NodeSet.py (__contains__): fixed issue that could appear when padding was used, eg. "node113" in "node[030,113] didn't work. 2009-07-08 S. Thiell * Version 1.1 beta 6 released. * clush.py: major improvements (added write support, better interactive mode with readline, launch task in separate thread to let the main thread blocking tty input, added Enter key press support during run, added node groups support (-a and -g) using external commands defined in clush.conf, added --copy toggle to clush to copy files to the cluster nodes, added -q option, added progress indicator when clush is called with gather option -b) * Added man pages for clush and nodeset commands. * doc/extras/vim (clush.vim): added vim syntax files for clush.conf * Engine.py: (feature) added write support to workers * Worker: (api) created a base class WorkerSimple 2009-04-17 S. Thiell * Version 1.1 beta 5 released (LUG'09 live update). * Worker/Worker.py: (bug) update last_node so that user can call worker.last_node() in an ev_timeout handler callback. 2009-04-17 A. Degremont * clush.py: (feature) make use of optparse.OptionParser 2009-04-15 S. Thiell * Version 1.1 beta 4 released. 2009-04-14 S. Thiell * Engine/Engine.py (EngineBaseTimer): (bug) fixed issue in timers when invalidated two times. 2009-04-06 S. Thiell * Version 1.1 beta 3 released. * Engine/Engine.py (_EngineTimerQ): (bug) fixed issue in timer invalidation. 2009-04-03 S. Thiell * Engine/Engine.py (EngineTimer): (api) added is_valid() method to check if a timer is still valid. * Task.py: (api) added optional `match_keys' parameter in Task and Worker iter_buffers() and iter_retcodes() methods. 2009-03-26 S. Thiell * Version 1.1 beta 2 released. 2009-03-23 S. Thiell * Worker/Worker.py: (api) added Worker.did_timeout() method to check if a worker has timed out (useful for Popen2 workers, other use DistantWorker.num_timeout()). 2009-02-21 S. Thiell * Version 1.1 beta 1 released. 2009-02-20 S. Thiell * NodeSet.py (NodeSet): (api) added clear() method. (RangeSet): likewise. * NodeSet.py (NodeSet): added workaround to allow NodeSet to be properly pickled (+inf floating number pickle bug with Python 2.4) * NodeSet.py (RangeSet): (bug) don't keep a reference on internal RangeSet when creating a NodeSet from another one. 2009-02-16 S. Thiell * Version 1.1 beta 0 released. * Worker/Ssh.py: (feature) new worker, based on OpenSSH, with fanout support (thus removing ClusterShell mandatory pdsh dependency). * Engine/Engine.py: (feature, api) added timer and repeater support. * 1.0->1.1 internal design changes. Copyright CEA/DAM/DIF (2009, 2010, 2011) Copying and distribution of this file, with or without modification, are permitted provided the copyright notice and this notice are preserved. clustershell-1.6/conf/0000755000130500135250000000000011741572333014345 5ustar thiellgpocreclustershell-1.6/conf/groups.conf.d/0000755000130500135250000000000011741572334017033 5ustar thiellgpocreclustershell-1.6/conf/groups.conf.d/slurm.conf.example0000644000130500135250000000045411741571247022503 0ustar thiellgpocre# Additional ClusterShell group source config file # # Please see `man 5 groups.conf` for further details. # # SLURM partition bindings # [slurm] map: sinfo -h -o "%N" -p $GROUP all: sinfo -h -o "%N" list: sinfo -h -o "%P" reverse: sinfo -h -N -o "%P" -n $NODE # TODO: add slurm jobs bindings here clustershell-1.6/conf/groups.conf.d/genders.conf.example0000644000130500135250000000032211741571247022762 0ustar thiellgpocre# Additional ClusterShell group source config file # # Please see `man 5 groups.conf` for further details. # # LLNL genders bindings # [genders] map: nodeattr -n $GROUP all: nodeattr -n ALL list: nodeattr -l clustershell-1.6/conf/groups.conf.d/README0000644000130500135250000000035711741571247017722 0ustar thiellgpocregroups.conf.d/README Default directory for additional node group sources configuration files. ClusterShell scans the directory set by the groupsdir variable, defined in /etc/clustershell/groups.conf, loading all files of the form *.conf. clustershell-1.6/conf/groups.conf0000644000130500135250000000134211741571247016536 0ustar thiellgpocre# ClusterShell node groups primary configuration file # # Please see `man 5 groups.conf` for further details. # # NOTE: This is a simple group configuration example file, not a # default config file. Please edit it to fit your own needs. # [Main] # Default group source default: local # Uncomment the next line to enable additional group source config files: #groupsdir: /etc/clustershell/groups.conf.d # EXAMPLE: flat file "group: nodeset" based group source [local] map: sed -n 's/^$GROUP:\(.*\)/\1/p' /etc/clustershell/groups all: sed -n 's/^all:\(.*\)/\1/p' /etc/clustershell/groups list: sed -n 's/^\([0-9A-Za-z_-]*\):.*/\1/p' /etc/clustershell/groups # Other examples of group sources are now available in groups.conf.d/ clustershell-1.6/conf/clush.conf0000644000130500135250000000042611741571247016337 0ustar thiellgpocre# Configuration file for clush # # Please see man clush.conf(5) # [Main] fanout: 64 connect_timeout: 15 command_timeout: 0 color: auto fd_max: 16384 history_size: 100 node_count: yes verbosity: 1 #ssh_user: root #ssh_path: /usr/bin/ssh #ssh_options: -oStrictHostKeyChecking=no clustershell-1.6/conf/groups0000644000130500135250000000021411741571247015607 0ustar thiellgpocreadm: example0 oss: example4 example5 mds: example6 io: example[4-6] compute: example[32-159] gpu: example[156-159] all: example[4-6,32-159] clustershell-1.6/scripts/0000755000130500135250000000000011741572333015107 5ustar thiellgpocreclustershell-1.6/scripts/clush.py0000755000130500135250000000020711741571247016604 0ustar thiellgpocre#!/usr/bin/env python """ clush command-line tool """ from ClusterShell.CLI.Clush import main if __name__ == '__main__': main() clustershell-1.6/scripts/nodeset.py0000755000130500135250000000021311741571247017124 0ustar thiellgpocre#!/usr/bin/env python """ nodeset command-line tool """ from ClusterShell.CLI.Nodeset import main if __name__ == '__main__': main() clustershell-1.6/scripts/clubak.py0000755000130500135250000000021111741571247016722 0ustar thiellgpocre#!/usr/bin/env python """ clubak command-line tool """ from ClusterShell.CLI.Clubak import main if __name__ == '__main__': main() clustershell-1.6/tests/0000755000130500135250000000000011741572334014563 5ustar thiellgpocreclustershell-1.6/tests/TaskDistantPdshTest.py0000644000130500135250000004572111741571247021060 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (distant, pdsh worker) test suite # Written by S. Thiell 2009-02-13 """Unit test for ClusterShell Task (distant, pdsh worker)""" import copy import shutil import sys import tempfile import unittest sys.path.insert(0, '../lib') from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import * from ClusterShell.Worker.Worker import WorkerBadArgumentError from ClusterShell.Worker.Pdsh import WorkerPdsh from ClusterShell.Worker.EngineClient import * import socket # TEventHandlerChecker 'received event' flags EV_START=0x01 EV_READ=0x02 EV_WRITTEN=0x04 EV_HUP=0x08 EV_TIMEOUT=0x10 EV_CLOSE=0x20 class TaskDistantTest(unittest.TestCase): def setUp(self): self._task = task_self() self.assert_(self._task != None) def testWorkerPdshGetCommand(self): """test worker.command with WorkerPdsh""" worker1 = WorkerPdsh("localhost", command="/bin/echo foo bar fuu", handler=None, timeout=5) self.assert_(worker1 != None) self._task.schedule(worker1) worker2 = WorkerPdsh("localhost", command="/bin/echo blah blah foo", handler=None, timeout=5) self.assert_(worker2 != None) self._task.schedule(worker2) # run task self._task.resume() # test output self.assertEqual(worker1.node_buffer("localhost"), "foo bar fuu") self.assertEqual(worker1.command, "/bin/echo foo bar fuu") self.assertEqual(worker2.node_buffer("localhost"), "blah blah foo") self.assertEqual(worker2.command, "/bin/echo blah blah foo") def testLocalhostExplicitPdshCopy(self): """test simple localhost copy with explicit pdsh worker""" dest = "/tmp/cs-test_testLocalhostExplicitPdshCopy" worker = WorkerPdsh("localhost", source="/etc/hosts", dest=dest, handler=None, timeout=10) self._task.schedule(worker) self._task.resume() self.assertEqual(worker.source, "/etc/hosts") self.assertEqual(worker.dest, dest) def testLocalhostExplicitPdshCopyDir(self): """test simple localhost copy dir with explicit pdsh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") # pdcp worker doesn't create custom destination directory dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitPdshCopyDir") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerPdsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=10) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testLocalhostExplicitPdshCopyDirPreserve(self): """test simple localhost preserve copy dir with explicit pdsh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") # pdcp worker doesn't create custom destination directory dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitPdshCopyDirPreserve") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerPdsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=10, preserve=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testExplicitPdshWorker(self): """test simple localhost command with explicit pdsh worker""" # init worker worker = WorkerPdsh("localhost", command="echo alright", handler=None, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_buffer("localhost"), "alright") def testExplicitPdshWorkerStdErr(self): """test simple localhost command with explicit pdsh worker (stderr)""" # init worker worker = WorkerPdsh("localhost", command="echo alright 1>&2", handler=None, stderr=True, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_error_buffer("localhost"), "alright") # Re-test with stderr=False worker = WorkerPdsh("localhost", command="echo alright 1>&2", handler=None, stderr=False, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_error_buffer("localhost"), None) def testPdshWorkerWriteNotSupported(self): """test that write is reported as not supported with pdsh""" # init worker worker = WorkerPdsh("localhost", command="uname -r", handler=None, timeout=5) self.assertRaises(EngineClientNotSupportedError, worker.write, "toto") class TEventHandlerChecker(EventHandler): """simple event trigger validator""" def __init__(self, test): self.test = test self.flags = 0 self.read_count = 0 self.written_count = 0 def ev_start(self, worker): self.test.assertEqual(self.flags, 0) self.flags |= EV_START def ev_read(self, worker): self.test.assertEqual(self.flags, EV_START) self.flags |= EV_READ self.last_node, self.last_read = worker.last_read() def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP self.last_rc = worker.last_retcode() def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT self.last_node = worker.last_node() def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.test.assert_(self.flags & EV_CLOSE == 0) self.flags |= EV_CLOSE def testExplicitWorkerPdshShellEvents(self): """test triggered events with explicit pdsh worker""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = WorkerPdsh("localhost", command="hostname", handler=test_eh, timeout=None) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test events received: start, read, hup, close self.assertEqual(test_eh.flags, EV_START | EV_READ | EV_HUP | EV_CLOSE) def testExplicitWorkerPdshShellEventsWithTimeout(self): """test triggered events (with timeout) with explicit pdsh worker""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = WorkerPdsh("localhost", command="echo alright && sleep 10", handler=test_eh, timeout=2) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test events received: start, read, timeout, close self.assertEqual(test_eh.flags, EV_START | EV_READ | EV_TIMEOUT | EV_CLOSE) self.assertEqual(worker.node_buffer("localhost"), "alright") def testShellPdshEventsNoReadNoTimeout(self): """test triggered events (no read, no timeout) with explicit pdsh worker""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = WorkerPdsh("localhost", command="sleep 2", handler=test_eh, timeout=None) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test events received: start, close self.assertEqual(test_eh.flags, EV_START | EV_HUP | EV_CLOSE) self.assertEqual(worker.node_buffer("localhost"), None) def testWorkerPdshBuffers(self): """test buffers at pdsh worker level""" task = task_self() self.assert_(task != None) worker = WorkerPdsh("localhost", command="printf 'foo\nbar\nxxx\n'", handler=None, timeout=None) task.schedule(worker) task.resume() cnt = 2 for buf, nodes in worker.iter_buffers(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(len(nodes), 1) self.assertEqual(str(nodes), "localhost") self.assertEqual(cnt, 1) for buf, nodes in worker.iter_buffers("localhost"): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(len(nodes), 1) self.assertEqual(str(nodes), "localhost") self.assertEqual(cnt, 0) def testWorkerPdshNodeBuffers(self): """test iter_node_buffers on distant pdsh workers""" task = task_self() self.assert_(task != None) worker = WorkerPdsh("localhost", command="/usr/bin/printf 'foo\nbar\nxxx\n'", handler=None, timeout=None) task.schedule(worker) task.resume() cnt = 1 for node, buf in worker.iter_node_buffers(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testWorkerPdshNodeErrors(self): """test iter_node_errors on distant pdsh workers""" task = task_self() self.assert_(task != None) worker = WorkerPdsh("localhost", command="/usr/bin/printf 'foo\nbar\nxxx\n' 1>&2", handler=None, timeout=None, stderr=True) task.schedule(worker) task.resume() cnt = 1 for node, buf in worker.iter_node_errors(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testWorkerPdshRetcodes(self): """test retcodes on distant pdsh workers""" task = task_self() self.assert_(task != None) worker = WorkerPdsh("localhost", command="/bin/sh -c 'exit 3'", handler=None, timeout=None) task.schedule(worker) task.resume() cnt = 2 for rc, keys in worker.iter_retcodes(): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(len(keys), 1) self.assert_(keys[0] == "localhost") self.assertEqual(cnt, 1) for rc, keys in worker.iter_retcodes("localhost"): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(len(keys), 1) self.assert_(keys[0] == "localhost") self.assertEqual(cnt, 0) # test node_retcode self.assertEqual(worker.node_retcode("localhost"), 3) # 1.2.91+ self.assertEqual(worker.node_rc("localhost"), 3) # test node_retcode failure self.assertRaises(KeyError, worker.node_retcode, "dummy") # test max retcode API self.assertEqual(task.max_retcode(), 3) def testWorkerNodeRetcodes(self): """test iter_node_retcodes on distant pdsh workers""" task = task_self() self.assert_(task != None) worker = WorkerPdsh("localhost", command="/bin/sh -c 'exit 3'", handler=None, timeout=None) task.schedule(worker) task.resume() cnt = 1 for node, rc in worker.iter_node_retcodes(): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testEscapePdsh(self): """test distant worker (pdsh) cmd with escaped variable""" worker = WorkerPdsh("localhost", command="export CSTEST=foobar; /bin/echo \$CSTEST | sed 's/\ foo/bar/'", handler=None, timeout=None) self.assert_(worker != None) #task.set_info("debug", True) self._task.schedule(worker) # execute self._task.resume() # read result self.assertEqual(worker.node_buffer("localhost"), "$CSTEST") def testEscapePdsh2(self): """test distant worker (pdsh) cmd with non-escaped variable""" worker = WorkerPdsh("localhost", command="export CSTEST=foobar; /bin/echo $CSTEST | sed 's/\ foo/bar/'", handler=None, timeout=None) self._task.schedule(worker) # execute self._task.resume() # read result self.assertEqual(worker.node_buffer("localhost"), "foobar") def testShellPdshStderrWithHandler(self): """test reading stderr of distant pdsh worker on event handler""" class StdErrHandler(EventHandler): def ev_error(self, worker): assert worker.last_error() == "something wrong" worker = WorkerPdsh("localhost", command="echo something wrong 1>&2", handler=StdErrHandler(), timeout=None) self._task.schedule(worker) self._task.resume() for buf, nodes in worker.iter_errors(): self.assertEqual(buf, "something wrong") for buf, nodes in worker.iter_errors('localhost'): self.assertEqual(buf, "something wrong") def testCommandTimeoutOption(self): """test pdsh shell with command_timeout set""" command_timeout_orig = self._task.info("command_timeout") self._task.set_info("command_timeout", 1) worker = WorkerPdsh("localhost", command="sleep 10", handler=None, timeout=None) self._task.schedule(worker) self.assert_(worker != None) self._task.resume() # restore original command_timeout (0) self.assertEqual(command_timeout_orig, 0) self._task.set_info("command_timeout", command_timeout_orig) def testPdshBadArgumentOption(self): """test WorkerPdsh constructor bad argument""" # Check code < 1.4 compatibility self.assertRaises(WorkerBadArgumentError, WorkerPdsh, "localhost", None, None) # As of 1.4, ValueError is raised for missing parameter self.assertRaises(ValueError, WorkerPdsh, "localhost", None, None) # 1.4+ def testCopyEvents(self): """test triggered events on WorkerPdsh copy""" test_eh = self.__class__.TEventHandlerChecker(self) dest = "/tmp/cs-test_testLocalhostPdshCopyEvents" worker = WorkerPdsh("localhost", source="/etc/hosts", dest=dest, handler=test_eh, timeout=10) self._task.schedule(worker) self._task.resume() self.assertEqual(test_eh.flags, EV_START | EV_HUP | EV_CLOSE) def testWorkerAbort(self): """test WorkerPdsh abort() on timer""" task = task_self() self.assert_(task != None) class AbortOnTimer(EventHandler): def __init__(self, worker): EventHandler.__init__(self) self.ext_worker = worker self.testtimer = False def ev_timer(self, timer): self.ext_worker.abort() self.testtimer = True worker = WorkerPdsh("localhost", command="sleep 10", handler=None, timeout=None) task.schedule(worker) aot = AbortOnTimer(worker) self.assertEqual(aot.testtimer, False) task.timer(2.0, handler=aot) task.resume() self.assertEqual(aot.testtimer, True) def testWorkerAbortSanity(self): """test WorkerPdsh abort() (sanity)""" task = task_self() # test noop abort() on unscheduled worker worker = WorkerPdsh("localhost", command="sleep 1", handler=None, timeout=None) worker.abort() def testLocalhostExplicitPdshReverseCopy(self): """test simple localhost rcopy with explicit pdsh worker""" dest = "/tmp/cs-test_testLocalhostExplicitPdshRCopy" shutil.rmtree(dest, ignore_errors=True) os.mkdir(dest) worker = WorkerPdsh("localhost", source="/etc/hosts", dest=dest, handler=None, timeout=10, reverse=True) self._task.schedule(worker) self._task.resume() self.assertEqual(worker.source, "/etc/hosts") self.assertEqual(worker.dest, dest) self.assert_(os.path.exists(os.path.join(dest, "hosts.localhost"))) def testLocalhostExplicitPdshReverseCopyDir(self): """test simple localhost rcopy dir with explicit pdsh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitPdshReverseCopyDir") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerPdsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=30, reverse=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ "%s.localhost" % os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testLocalhostExplicitPdshReverseCopyDirPreserve(self): """test simple localhost preserve rcopy dir with explicit pdsh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitPdshReverseCopyDirPreserve") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerPdsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=30, preserve=True, reverse=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ "%s.localhost" % os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskDistantTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskTimerTest.py0000644000130500135250000004006411741571247017706 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell timer test suite # Written by S. Thiell 2009-02-15 """Unit test for ClusterShell Task's timer""" import copy import thread from time import sleep, time import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.Engine.Engine import EngineTimer, EngineIllegalOperationError from ClusterShell.Event import EventHandler from ClusterShell.Task import * EV_START=0x01 EV_READ=0x02 EV_WRITTEN=0x04 EV_HUP=0x08 EV_TIMEOUT=0x10 EV_CLOSE=0x20 EV_TIMER=0x40 class TaskTimerTest(unittest.TestCase): class TSimpleTimerChecker(EventHandler): def __init__(self): self.count = 0 def ev_timer(self, timer): self.count += 1 def testSimpleTimer(self): """test simple timer""" task = task_self() self.assert_(task != None) # init event handler for timer's callback test_handler = self.__class__.TSimpleTimerChecker() timer1 = task.timer(1.0, handler=test_handler) self.assert_(timer1 != None) # run task task.resume() self.assertEqual(test_handler.count, 1) def testSimpleTimer2(self): """test simple 2 timers with same fire_date""" task = task_self() self.assert_(task != None) test_handler = self.__class__.TSimpleTimerChecker() timer1 = task.timer(1.0, handler=test_handler) self.assert_(timer1 != None) timer2 = task.timer(1.0, handler=test_handler) self.assert_(timer2 != None) task.resume() self.assertEqual(test_handler.count, 2) class TRepeaterTimerChecker(EventHandler): def __init__(self): self.count = 0 def ev_timer(self, timer): self.count += 1 timer.set_nextfire(0.5) if self.count > 4: timer.invalidate() def testSimpleRepeater(self): """test simple repeater timer""" task = task_self() self.assert_(task != None) # init event handler for timer's callback test_handler = self.__class__.TRepeaterTimerChecker() timer1 = task.timer(1.0, interval=0.5, handler=test_handler) self.assert_(timer1 != None) # run task task.resume() self.assertEqual(test_handler.count, 5) def testRepeaterInvalidatedTwice(self): """test repeater timer invalidated two times""" task = task_self() self.assert_(task != None) # init event handler for timer's callback test_handler = self.__class__.TRepeaterTimerChecker() timer1 = task.timer(1.0, interval=0.5, handler=test_handler) self.assert_(timer1 != None) # run task task.resume() self.assertEqual(test_handler.count, 5) # force invalidation again (2d time), this should do nothing timer1.invalidate() # call handler one more time directly: set_nextfire should raise an error self.assertRaises(EngineIllegalOperationError, test_handler.ev_timer, timer1) # force invalidation again (3th), this should do nothing timer1.invalidate() def launchSimplePrecisionTest(self, delay): task = task_self() self.assert_(task != None) # init event handler for timer's callback test_handler = self.__class__.TSimpleTimerChecker() timer1 = task.timer(delay, handler=test_handler) self.assert_(timer1 != None) t1 = time() # run task task.resume() t2 = time() check_precision = 0.05 self.assert_(abs((t2 - t1) - delay) < check_precision, \ "%f >= %f" % (abs((t2 - t1) - delay), check_precision)) self.assertEqual(test_handler.count, 1) def testPrecision1(self): """test simple timer precision (0.1s)""" self.launchSimplePrecisionTest(0.1) def testPrecision2(self): """test simple timer precision (1.0s)""" self.launchSimplePrecisionTest(1.0) def testWorkersAndTimer(self): """test task with timer and local jobs""" task0 = task_self() self.assert_(task0 != None) worker1 = task0.shell("/bin/hostname") worker2 = task0.shell("/bin/uname -a") test_handler = self.__class__.TSimpleTimerChecker() timer1 = task0.timer(1.0, handler=test_handler) self.assert_(timer1 != None) task0.resume() self.assertEqual(test_handler.count, 1) b1 = copy.copy(worker1.read()) b2 = copy.copy(worker2.read()) worker1 = task0.shell("/bin/hostname") self.assert_(worker1 != None) worker2 = task0.shell("/bin/uname -a") self.assert_(worker2 != None) timer1 = task0.timer(1.0, handler=test_handler) self.assert_(timer1 != None) task0.resume() self.assertEqual(test_handler.count, 2) # same handler, called 2 times self.assert_(worker2.read() == b2) self.assert_(worker1.read() == b1) def testNTimers(self): """test multiple timers""" task = task_self() self.assert_(task != None) # init event handler for timer's callback test_handler = self.__class__.TSimpleTimerChecker() for i in range(0, 30): timer1 = task.timer(1.0 + 0.2 * i, handler=test_handler) self.assert_(timer1 != None) # run task task.resume() self.assertEqual(test_handler.count, 30) class TEventHandlerTimerInvalidate(EventHandler): """timer operations event handler simulator""" def __init__(self, test): self.test = test self.timer = None self.timer_count = 0 self.flags = 0 def ev_start(self, worker): self.flags |= EV_START def ev_read(self, worker): self.test.assertEqual(self.flags, EV_START) self.flags |= EV_READ def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_CLOSE def ev_timer(self, timer): self.flags |= EV_TIMER self.timer_count += 1 self.timer.invalidate() def testTimerInvalidateInHandler(self): """test timer invalidate in event handler""" task = task_self() self.assert_(task != None) test_eh = self.__class__.TEventHandlerTimerInvalidate(self) # init worker worker = task.shell("/bin/sleep 1", handler=test_eh) self.assert_(worker != None) worker = task.shell("/bin/sleep 3", nodes='localhost', handler=test_eh) self.assert_(worker != None) # init timer timer = task.timer(1.5, interval=0.5, handler=test_eh) self.assert_(timer != None) test_eh.timer = timer # run task task.resume() # test timer did fire once self.assertEqual(test_eh.timer_count, 1) class TEventHandlerTimerSetNextFire(EventHandler): def __init__(self, test): self.test = test self.timer = None self.timer_count = 0 self.flags = 0 def ev_start(self, worker): self.flags |= EV_START def ev_read(self, worker): self.test.assertEqual(self.flags, EV_START) self.flags |= EV_READ def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_CLOSE def ev_timer(self, timer): self.flags |= EV_TIMER if self.timer_count < 4: self.timer.set_nextfire(0.5) # else invalidate automatically as timer does not repeat self.timer_count += 1 def testTimerSetNextFireInHandler(self): """test timer set_nextfire in event handler""" task = task_self() self.assert_(task != None) test_eh = self.__class__.TEventHandlerTimerSetNextFire(self) # init worker worker = task.shell("/bin/sleep 6", nodes='localhost', handler=test_eh) self.assert_(worker != None) # init timer timer = task.timer(2.0, interval=0.2, handler=test_eh) self.assert_(timer != None) test_eh.timer = timer # run task task.resume() # test timer did fire one time self.assertEqual(test_eh.timer_count, 5) class TEventHandlerTimerOtherInvalidate(EventHandler): """timer operations event handler simulator""" def __init__(self, test): self.test = test self.timer = None self.flags = 0 def ev_start(self, worker): self.flags |= EV_START def ev_read(self, worker): self.flags |= EV_READ self.timer.invalidate() def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_CLOSE def ev_timer(self, timer): self.flags |= EV_TIMER def testTimerInvalidateInOtherHandler(self): """test timer invalidate in other event handler""" task = task_self() self.assert_(task != None) test_eh = self.__class__.TEventHandlerTimerOtherInvalidate(self) # init worker worker = task.shell("/bin/uname -r", handler=test_eh) self.assert_(worker != None) worker = task.shell("/bin/sleep 5", nodes='localhost', handler=test_eh) self.assert_(worker != None) # init timer timer = task.timer(4.0, interval=0.5, handler=test_eh) self.assert_(timer != None) test_eh.timer = timer # run task task.resume() # test timer didn't fire, invalidated in a worker's event handler self.assert_(test_eh.flags & EV_READ) self.assert_(not test_eh.flags & EV_TIMER) class TEventHandlerTimerOtherSetNextFire(EventHandler): def __init__(self, test): self.test = test self.timer = None self.timer_count = 0 self.flags = 0 def ev_start(self, worker): self.flags |= EV_START def ev_read(self, worker): self.test.assertEqual(self.flags, EV_START) self.flags |= EV_READ def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_CLOSE # set next fire delay, also disable previously setup interval # (timer will not repeat anymore) self.timer.set_nextfire(0.5) def ev_timer(self, timer): self.flags |= EV_TIMER self.timer_count += 1 def testTimerSetNextFireInOtherHandler(self): """test timer set_nextfire in other event handler""" task = task_self() self.assert_(task != None) test_eh = self.__class__.TEventHandlerTimerOtherSetNextFire(self) # init worker worker = task.shell("/bin/sleep 1", nodes='localhost', handler=test_eh) self.assert_(worker != None) # init timer timer = task.timer(10.0, interval=0.5, handler=test_eh) self.assert_(timer != None) test_eh.timer = timer # run task task.resume() # test timer did fire one time self.assertEqual(test_eh.timer_count, 1) def testAutocloseTimer(self): """test timer autoclose (one autoclose timer)""" task = task_self() self.assert_(task != None) # Task should return immediately test_handler = self.__class__.TSimpleTimerChecker() timer_ac = task.timer(10.0, handler=test_handler, autoclose=True) self.assert_(timer_ac != None) # run task task.resume() self.assertEqual(test_handler.count, 0) def testAutocloseWithTwoTimers(self): """test timer autoclose (two timers)""" task = task_self() self.assert_(task != None) # build 2 timers, one of 10 secs with autoclose, # and one of 1 sec without autoclose. # Task should return after 1 sec. test_handler = self.__class__.TSimpleTimerChecker() timer_ac = task.timer(10.0, handler=test_handler, autoclose=True) self.assert_(timer_ac != None) timer_noac = task.timer(1.0, handler=test_handler, autoclose=False) self.assert_(timer_noac != None) # run task task.resume() self.assertEqual(test_handler.count, 1) class TForceDelayedRepeaterChecker(EventHandler): def __init__(self): self.count = 0 def ev_timer(self, timer): self.count += 1 if self.count == 1: # force delay timer (NOT a best practice!) sleep(4) # do not invalidate first time else: # invalidate next time to stop repeater timer.invalidate() def testForceDelayedRepeater(self): """test repeater being forcibly delayed""" task = task_self() self.assert_(task != None) test_handler = self.__class__.TForceDelayedRepeaterChecker() repeater1 = task.timer(1.0, interval=0.5, handler=test_handler) self.assert_(repeater1 != None) task.resume() self.assertEqual(test_handler.count, 2) def testMultipleAddSameTimerPrivate(self): """test multiple add() of same timer [private]""" task = task_self() self.assert_(task != None) test_handler = self.__class__.TSimpleTimerChecker() timer = EngineTimer(1.0, -1.0, False, test_handler) self.assert_(timer != None) task._engine.add_timer(timer) self.assertRaises(EngineIllegalOperationError, task._engine.add_timer, timer) task_terminate() def testRemoveTimerPrivate(self): """test engine.remove_timer() [private]""" # [private] because engine methods are currently private, # users should use timer.invalidate() instead task = task_self() self.assert_(task != None) test_handler = self.__class__.TSimpleTimerChecker() timer = EngineTimer(1.0, -1.0, False, test_handler) self.assert_(timer != None) task._engine.add_timer(timer) task._engine.remove_timer(timer) task_terminate() def _thread_timer_create_func(self, task): """thread used to create a timer for another task; hey why not?""" timer = task.timer(0.5, self.__class__.TSimpleTimerChecker()) self.assert_(timer != None) def testTimerAddFromAnotherThread(self): """test timer creation from another thread""" task = task_self() thread.start_new_thread(TaskTimerTest._thread_timer_create_func, (self, task)) task.resume() task_wait() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskTimerTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskDistantTest.py0000644000130500135250000006500011741571247020231 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (distant) test suite # Written by S. Thiell 2009-02-13 """Unit test for ClusterShell Task (distant)""" import copy import pwd import shutil import sys import tempfile import unittest sys.path.insert(0, '../lib') from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import * from ClusterShell.Worker.Ssh import WorkerSsh from ClusterShell.Worker.EngineClient import * from ClusterShell.Worker.Worker import WorkerBadArgumentError import socket # TEventHandlerChecker 'received event' flags EV_START=0x01 EV_READ=0x02 EV_WRITTEN=0x04 EV_HUP=0x08 EV_TIMEOUT=0x10 EV_CLOSE=0x20 class TaskDistantTest(unittest.TestCase): def setUp(self): self._task = task_self() self.assert_(self._task != None) def testLocalhostCommand(self): """test simple localhost command""" # init worker worker = self._task.shell("/bin/hostname", nodes='localhost') self.assert_(worker != None) # run task self._task.resume() def testLocalhostCommand2(self): """test two simple localhost commands""" # init worker worker = self._task.shell("/bin/hostname", nodes='localhost') self.assert_(worker != None) worker = self._task.shell("/bin/uname -r", nodes='localhost') self.assert_(worker != None) # run task self._task.resume() def testTaskShellWorkerGetCommand(self): """test worker.command with task.shell()""" worker1 = self._task.shell("/bin/hostname", nodes='localhost') self.assert_(worker1 != None) worker2 = self._task.shell("/bin/uname -r", nodes='localhost') self.assert_(worker2 != None) self._task.resume() self.assert_(hasattr(worker1, 'command')) self.assert_(hasattr(worker2, 'command')) self.assertEqual(worker1.command, "/bin/hostname") self.assertEqual(worker2.command, "/bin/uname -r") def testLocalhostCopy(self): """test simple localhost copy""" # init worker worker = self._task.copy("/etc/hosts", "/tmp/cs-test_testLocalhostCopy", nodes='localhost') self.assert_(worker != None) # run task self._task.resume() def testCopyNodeFailure(self): """test node failure error handling on simple copy""" # == stderr merged == self._task.set_default("stderr", False) worker = self._task.copy("/etc/hosts", "/tmp/cs-test_testLocalhostCopyF", nodes='unlikely-node,localhost') self.assert_(worker != None) self._task.resume() self.assert_(worker.node_error_buffer("unlikely-node") is None) self.assert_(len(worker.node_buffer("unlikely-node")) > 2) # == stderr separated == self._task.set_default("stderr", True) try: worker = self._task.copy("/etc/hosts", "/tmp/cs-test_testLocalhostCopyF2", nodes='unlikely-node,localhost') self.assert_(worker != None) # run task self._task.resume() self.assert_(worker.node_buffer("unlikely-node") is None) self.assert_(len(worker.node_error_buffer("unlikely-node")) > 2) finally: self._task.set_default("stderr", False) def testLocalhostCopyDir(self): """test simple localhost directory copy""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostCopyDir") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = self._task.copy(dtmp_src, dtmp_dst, nodes='localhost') self.assert_(worker != None) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testLocalhostExplicitSshCopy(self): """test simple localhost copy with explicit ssh worker""" dest = "/tmp/cs-test_testLocalhostExplicitSshCopy" try: worker = WorkerSsh("localhost", source="/etc/hosts", dest=dest, handler=None, timeout=10) self._task.schedule(worker) self._task.resume() finally: os.remove(dest) def testLocalhostExplicitSshCopyDir(self): """test simple localhost copy dir with explicit ssh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitSshCopyDir") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerSsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=10) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testLocalhostExplicitSshCopyDirPreserve(self): """test simple localhost preserve copy dir with explicit ssh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitSshCopyDirPreserve") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerSsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=10, preserve=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testExplicitSshWorker(self): """test simple localhost command with explicit ssh worker""" # init worker worker = WorkerSsh("localhost", command="/bin/echo alright", handler=None, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_buffer("localhost"), "alright") def testExplicitSshWorkerStdErr(self): """test simple localhost command with explicit ssh worker (stderr)""" # init worker worker = WorkerSsh("localhost", command="/bin/echo alright 1>&2", handler=None, stderr=True, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_error_buffer("localhost"), "alright") # Re-test with stderr=False worker = WorkerSsh("localhost", command="/bin/echo alright 1>&2", handler=None, stderr=False, timeout=5) self.assert_(worker != None) self._task.schedule(worker) # run task self._task.resume() # test output self.assertEqual(worker.node_error_buffer("localhost"), None) class TEventHandlerChecker(EventHandler): """simple event trigger validator""" def __init__(self, test): self.test = test self.flags = 0 self.read_count = 0 self.written_count = 0 def ev_start(self, worker): self.test.assertEqual(self.flags, 0) self.flags |= EV_START def ev_read(self, worker): self.test.assertEqual(self.flags, EV_START) self.flags |= EV_READ self.last_node, self.last_read = worker.last_read() def ev_written(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_WRITTEN def ev_hup(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_HUP self.last_rc = worker.last_retcode() def ev_timeout(self, worker): self.test.assert_(self.flags & EV_START) self.flags |= EV_TIMEOUT self.last_node = worker.last_node() def ev_close(self, worker): self.test.assert_(self.flags & EV_START) self.test.assert_(self.flags & EV_CLOSE == 0) self.flags |= EV_CLOSE def testShellEvents(self): """test triggered events""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = self._task.shell("/bin/hostname", nodes='localhost', handler=test_eh) self.assert_(worker != None) # run task self._task.resume() # test events received: start, read, hup, close self.assertEqual(test_eh.flags, EV_START | EV_READ | EV_HUP | EV_CLOSE) def testShellEventsWithTimeout(self): """test triggered events (with timeout)""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = self._task.shell("/bin/echo alright && /bin/sleep 10", nodes='localhost', handler=test_eh, timeout=2) self.assert_(worker != None) # run task self._task.resume() # test events received: start, read, timeout, close self.assertEqual(test_eh.flags, EV_START | EV_READ | EV_TIMEOUT | EV_CLOSE) self.assertEqual(worker.node_buffer("localhost"), "alright") self.assertEqual(worker.num_timeout(), 1) self.assertEqual(self._task.num_timeout(), 1) count = 0 for node in self._task.iter_keys_timeout(): count += 1 self.assertEqual(node, "localhost") self.assertEqual(count, 1) count = 0 for node in worker.iter_keys_timeout(): count += 1 self.assertEqual(node, "localhost") self.assertEqual(count, 1) def testShellEventsWithTimeout2(self): """test triggered events (with timeout) (more)""" # init worker test_eh1 = self.__class__.TEventHandlerChecker(self) worker1 = self._task.shell("/bin/echo alright && /bin/sleep 10", nodes='localhost', handler=test_eh1, timeout=2) self.assert_(worker1 != None) test_eh2 = self.__class__.TEventHandlerChecker(self) worker2 = self._task.shell("/bin/echo okay && /bin/sleep 10", nodes='localhost', handler=test_eh2, timeout=3) self.assert_(worker2 != None) # run task self._task.resume() # test events received: start, read, timeout, close self.assertEqual(test_eh1.flags, EV_START | EV_READ | EV_TIMEOUT | EV_CLOSE) self.assertEqual(test_eh2.flags, EV_START | EV_READ | EV_TIMEOUT | EV_CLOSE) self.assertEqual(worker1.node_buffer("localhost"), "alright") self.assertEqual(worker2.node_buffer("localhost"), "okay") self.assertEqual(worker1.num_timeout(), 1) self.assertEqual(worker2.num_timeout(), 1) self.assertEqual(self._task.num_timeout(), 2) def testShellEventsReadNoEOL(self): """test triggered events (read without EOL)""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = self._task.shell("/bin/echo -n okay", nodes='localhost', handler=test_eh) self.assert_(worker != None) # run task self._task.resume() # test events received: start, close self.assertEqual(test_eh.flags, EV_START | EV_READ | EV_HUP | EV_CLOSE) self.assertEqual(worker.node_buffer("localhost"), "okay") def testShellEventsNoReadNoTimeout(self): """test triggered events (no read, no timeout)""" # init worker test_eh = self.__class__.TEventHandlerChecker(self) worker = self._task.shell("/bin/sleep 2", nodes='localhost', handler=test_eh) self.assert_(worker != None) # run task self._task.resume() # test events received: start, close self.assertEqual(test_eh.flags, EV_START | EV_HUP | EV_CLOSE) self.assertEqual(worker.node_buffer("localhost"), None) def testLocalhostCommandFanout(self): """test fanout with localhost commands""" fanout = self._task.info("fanout") self._task.set_info("fanout", 2) # init worker for i in range(0, 10): worker = self._task.shell("/bin/echo %d" % i, nodes='localhost') self.assert_(worker != None) # run task self._task.resume() # restore fanout value self._task.set_info("fanout", fanout) def testWorkerBuffers(self): """test buffers at worker level""" task = task_self() self.assert_(task != None) worker = task.shell("/usr/bin/printf 'foo\nbar\nxxx\n'", nodes='localhost') task.resume() cnt = 2 for buf, nodes in worker.iter_buffers(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(len(nodes), 1) self.assertEqual(str(nodes), "localhost") self.assertEqual(cnt, 1) for buf, nodes in worker.iter_buffers("localhost"): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(len(nodes), 1) self.assertEqual(str(nodes), "localhost") self.assertEqual(cnt, 0) def testWorkerNodeBuffers(self): """test iter_node_buffers on distant workers""" task = task_self() self.assert_(task != None) worker = task.shell("/usr/bin/printf 'foo\nbar\nxxx\n'", nodes='localhost') task.resume() cnt = 1 for node, buf in worker.iter_node_buffers(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testWorkerNodeErrors(self): """test iter_node_errors on distant workers""" task = task_self() self.assert_(task != None) worker = task.shell("/usr/bin/printf 'foo\nbar\nxxx\n' 1>&2", nodes='localhost', stderr=True) task.resume() cnt = 1 for node, buf in worker.iter_node_errors(): cnt -= 1 if buf == "foo\nbar\nxxx\n": self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testWorkerRetcodes(self): """test retcodes on distant workers""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/sh -c 'exit 3'", nodes="localhost") task.resume() cnt = 2 for rc, keys in worker.iter_retcodes(): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(len(keys), 1) self.assert_(keys[0] == "localhost") self.assertEqual(cnt, 1) for rc, keys in worker.iter_retcodes("localhost"): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(len(keys), 1) self.assert_(keys[0] == "localhost") self.assertEqual(cnt, 0) # test node_retcode self.assertEqual(worker.node_retcode("localhost"), 3) # 1.2.91+ self.assertEqual(worker.node_rc("localhost"), 3) # test node_retcode failure self.assertRaises(KeyError, worker.node_retcode, "dummy") # test max retcode API self.assertEqual(task.max_retcode(), 3) def testWorkerNodeRetcodes(self): """test iter_node_retcodes on distant workers""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/sh -c 'exit 3'", nodes="localhost") task.resume() cnt = 1 for node, rc in worker.iter_node_retcodes(): cnt -= 1 self.assertEqual(rc, 3) self.assertEqual(node, "localhost") self.assertEqual(cnt, 0) def testEscape(self): """test distant worker (ssh) cmd with escaped variable""" worker = self._task.shell("export CSTEST=foobar; /bin/echo \$CSTEST | sed 's/\ foo/bar/'", nodes="localhost") # execute self._task.resume() # read result self.assertEqual(worker.node_buffer("localhost"), "$CSTEST") def testEscape2(self): """test distant worker (ssh) cmd with non-escaped variable""" worker = self._task.shell("export CSTEST=foobar; /bin/echo $CSTEST | sed 's/\ foo/bar/'", nodes="localhost") # execute self._task.resume() # read result self.assertEqual(worker.node_buffer("localhost"), "foobar") def testSshUserOption(self): """test task.shell() with ssh_user set""" ssh_user_orig = self._task.info("ssh_user") self._task.set_info("ssh_user", pwd.getpwuid(os.getuid())[0]) worker = self._task.shell("/bin/echo foobar", nodes="localhost") self.assert_(worker != None) self._task.resume() # restore original ssh_user (None) self.assertEqual(ssh_user_orig, None) self._task.set_info("ssh_user", ssh_user_orig) def testSshUserOptionForScp(self): """test task.copy() with ssh_user set""" ssh_user_orig = self._task.info("ssh_user") self._task.set_info("ssh_user", pwd.getpwuid(os.getuid())[0]) worker = self._task.copy("/etc/hosts", "/tmp/cs-test_testLocalhostCopyU", nodes='localhost') self.assert_(worker != None) self._task.resume() # restore original ssh_user (None) self.assertEqual(ssh_user_orig, None) self._task.set_info("ssh_user", ssh_user_orig) def testSshOptionsOption(self): """test task.shell() with ssh_options set""" ssh_options_orig = self._task.info("ssh_options") try: self._task.set_info("ssh_options", "-oLogLevel=QUIET") worker = self._task.shell("/bin/echo foobar", nodes="localhost") self.assert_(worker != None) self._task.resume() self.assertEqual(worker.node_buffer("localhost"), "foobar") # test 3 options self._task.set_info("ssh_options", \ "-oLogLevel=QUIET -oStrictHostKeyChecking=no -oVerifyHostKeyDNS=no") worker = self._task.shell("/bin/echo foobar3", nodes="localhost") self.assert_(worker != None) self._task.resume() self.assertEqual(worker.node_buffer("localhost"), "foobar3") finally: # restore original ssh_user (None) self.assertEqual(ssh_options_orig, None) self._task.set_info("ssh_options", ssh_options_orig) def testSshOptionsOptionForScp(self): """test task.copy() with ssh_options set""" ssh_options_orig = self._task.info("ssh_options") try: testfile = "/tmp/cs-test_testLocalhostCopyO" if os.path.exists(testfile): os.remove(testfile) self._task.set_info("ssh_options", \ "-oLogLevel=QUIET -oStrictHostKeyChecking=no -oVerifyHostKeyDNS=no") worker = self._task.copy("/etc/hosts", testfile, nodes='localhost') self.assert_(worker != None) self._task.resume() self.assert_(os.path.exists(testfile)) finally: # restore original ssh_user (None) self.assertEqual(ssh_options_orig, None) self._task.set_info("ssh_options", ssh_options_orig) def testShellStderrWithHandler(self): """test reading stderr of distant task.shell() on event handler""" class StdErrHandler(EventHandler): def ev_error(self, worker): assert worker.last_error() == "something wrong" worker = self._task.shell("echo something wrong 1>&2", nodes='localhost', handler=StdErrHandler()) self._task.resume() for buf, nodes in worker.iter_errors(): self.assertEqual(buf, "something wrong") for buf, nodes in worker.iter_errors('localhost'): self.assertEqual(buf, "something wrong") def testShellWriteSimple(self): """test simple write on distant task.shell()""" worker = self._task.shell("cat", nodes='localhost') worker.write("this is a test\n") worker.set_write_eof() self._task.resume() self.assertEqual(worker.node_buffer("localhost"), "this is a test") def testShellWriteHandler(self): """test write in event handler on distant task.shell()""" class WriteOnReadHandler(EventHandler): def __init__(self, target_worker): self.target_worker = target_worker def ev_read(self, worker): self.target_worker.write("%s:%s\n" % worker.last_read()) self.target_worker.set_write_eof() reader = self._task.shell("cat", nodes='localhost') worker = self._task.shell("sleep 1; echo foobar", nodes='localhost', handler=WriteOnReadHandler(reader)) self._task.resume() self.assertEqual(reader.node_buffer("localhost"), "localhost:foobar") def testSshBadArgumentOption(self): """test WorkerSsh constructor bad argument""" # Check code < 1.4 compatibility self.assertRaises(WorkerBadArgumentError, WorkerSsh, "localhost", None, None) # As of 1.4, ValueError is raised for missing parameter self.assertRaises(ValueError, WorkerSsh, "localhost", None, None) # 1.4+ def testCopyEvents(self): """test triggered events on task.copy()""" test_eh = self.__class__.TEventHandlerChecker(self) worker = self._task.copy("/etc/hosts", "/tmp/cs-test_testLocalhostCopyEvents", nodes='localhost', handler=test_eh) self.assert_(worker != None) # run task self._task.resume() self.assertEqual(test_eh.flags, EV_START | EV_HUP | EV_CLOSE) def testWorkerAbort(self): """test distant/ssh Worker abort() on timer""" task = task_self() self.assert_(task != None) # Test worker.abort() in an event handler. class AbortOnTimer(EventHandler): def __init__(self, worker): EventHandler.__init__(self) self.ext_worker = worker self.testtimer = False def ev_timer(self, timer): self.ext_worker.abort() self.testtimer = True aot = AbortOnTimer(task.shell("sleep 10", nodes="localhost")) self.assertEqual(aot.testtimer, False) task.timer(1.5, handler=aot) task.resume() self.assertEqual(aot.testtimer, True) def testWorkerAbortSanity(self): """test distant/ssh Worker abort() (sanity)""" task = task_self() worker = task.shell("sleep 1", nodes="localhost") worker.abort() # test noop abort() on unscheduled worker worker = WorkerSsh("localhost", command="sleep 1", handler=None, timeout=None) worker.abort() def testLocalhostExplicitSshReverseCopy(self): """test simple localhost rcopy with explicit ssh worker""" dest = "/tmp/cs-test_testLocalhostExplicitSshRCopy" shutil.rmtree(dest, ignore_errors=True) try: os.mkdir(dest) worker = WorkerSsh("localhost", source="/etc/hosts", dest=dest, handler=None, timeout=10, reverse=True) self._task.schedule(worker) self._task.resume() self.assertEqual(worker.source, "/etc/hosts") self.assertEqual(worker.dest, dest) self.assert_(os.path.exists(os.path.join(dest, "hosts.localhost"))) finally: shutil.rmtree(dest, ignore_errors=True) def testLocalhostExplicitSshReverseCopyDir(self): """test simple localhost rcopy dir with explicit ssh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitSshReverseCopyDir") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerSsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=30, reverse=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ "%s.localhost" % os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testLocalhostExplicitSshReverseCopyDirPreserve(self): """test simple localhost preserve rcopy dir with explicit ssh worker""" dtmp_src = tempfile.mkdtemp("_cs-test_src") dtmp_dst = tempfile.mkdtemp( \ "_cs-test_testLocalhostExplicitSshReverseCopyDirPreserve") try: os.mkdir(os.path.join(dtmp_src, "lev1_a")) os.mkdir(os.path.join(dtmp_src, "lev1_b")) os.mkdir(os.path.join(dtmp_src, "lev1_a", "lev2")) worker = WorkerSsh("localhost", source=dtmp_src, dest=dtmp_dst, handler=None, timeout=30, reverse=True) self._task.schedule(worker) self._task.resume() self.assert_(os.path.exists(os.path.join(dtmp_dst, \ "%s.localhost" % os.path.basename(dtmp_src), "lev1_a", "lev2"))) finally: shutil.rmtree(dtmp_dst, ignore_errors=True) shutil.rmtree(dtmp_src, ignore_errors=True) def testErroneousSshPath(self): """test erroneous ssh_path behavior""" try: self._task.set_info("ssh_path", "/wrong/path/to/ssh") # init worker worker = self._task.shell("/bin/echo ok", nodes='localhost') self.assert_(worker != None) # run task self._task.resume() self.assertEqual(self._task.max_retcode(), 255) finally: # restore fanout value self._task.set_info("ssh_path", None) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskDistantTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/CLIClushTest.py0000644000130500135250000002444411741571247017415 0ustar thiellgpocre#!/usr/bin/env python # scripts/clush.py tool test suite # Written by S. Thiell 2012-03-28 """Unit test for CLI/Clush.py""" import pwd import subprocess import sys import unittest from TLib import * import ClusterShell.CLI.Clush from ClusterShell.CLI.Clush import main from ClusterShell.Task import task_cleanup class CLIClushTest(unittest.TestCase): """Unit test class for testing CLI/Clush.py""" def tearDown(self): """cleanup all tasks""" task_cleanup() #subprocess.call(["ls", "-x", "/proc/self/fd"], stdout=sys.stdout) def _clush_t(self, args, input, expected_stdout, expected_rc=0, expected_stderr=None): """This new version allows code coverage checking by calling clush's main entry point.""" def raw_input_mock(prompt): time.sleep(3600) ClusterShell.CLI.Clush.raw_input = raw_input_mock clush_exit = ClusterShell.CLI.Clush.clush_exit try: ClusterShell.CLI.Clush.clush_exit = sys.exit # workaround (see #185) CLI_main(self, main, [ 'clush' ] + args, input, expected_stdout, expected_rc, expected_stderr) finally: ClusterShell.CLI.Clush.clush_exit = clush_exit ClusterShell.CLI.Clush.raw_input = raw_input def test_000_display(self): """test clush (display options)""" self._clush_t(["-w", "localhost", "true"], None, "") self._clush_t(["-w", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "echo", "ok", "ok"], None, \ "localhost: ok ok\n") self._clush_t(["-N", "-w", "localhost", "echo", "ok", "ok"], None, \ "ok ok\n") self._clush_t(["-qw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-vw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-qvw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-Sw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-Sqw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-Svw", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["--nostdin", "-w", "localhost", "echo", "ok"], None, \ "localhost: ok\n") def test_001_display_tty(self): """test clush (display options) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_000_display() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_002_fanout(self): """test clush (fanout)""" self._clush_t(["-f", "10", "-w", "localhost", "true"], None, "") self._clush_t(["-f", "1", "-w", "localhost", "true"], None, "") self._clush_t(["-f", "1", "-w", "localhost", "echo", "ok"], None, \ "localhost: ok\n") def test_003_fanout_tty(self): """test clush (fanout) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_002_fanout() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_004_ssh_options(self): """test clush (ssh options)""" self._clush_t(["-o", "-oStrictHostKeyChecking=no", "-w", "localhost", \ "echo", "ok"], None, "localhost: ok\n") self._clush_t(["-o", "-oStrictHostKeyChecking=no -oForwardX11=no", \ "-w", "localhost", "echo", "ok"], None, "localhost: ok\n") self._clush_t(["-o", "-oStrictHostKeyChecking=no", "-o", \ "-oForwardX11=no", "-w", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-o-oStrictHostKeyChecking=no", "-o-oForwardX11=no", \ "-w", "localhost", "echo", "ok"], None, "localhost: ok\n") self._clush_t(["-u", "4", "-w", "localhost", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-t", "4", "-u", "4", "-w", "localhost", "echo", \ "ok"], None, "localhost: ok\n") def test_005_ssh_options_tty(self): """test clush (ssh options) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_004_ssh_options() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_006_output_gathering(self): """test clush (output gathering)""" self._clush_t(["-w", "localhost", "-L", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-bL", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-qbL", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-BL", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-qBL", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-BLS", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-qBLS", "echo", "ok"], None, \ "localhost: ok\n") self._clush_t(["-w", "localhost", "-vb", "echo", "ok"], None, \ "localhost: ok\n---------------\nlocalhost\n---------------\nok\n") def test_007_output_gathering_tty(self): """test clush (output gathering) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_006_output_gathering() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_008_file_copy(self): """test clush (file copy)""" content = "%f" % time.time() f = make_temp_file(content) self._clush_t(["-w", "localhost", "-c", f.name], None, "") f.seek(0) self.assertEqual(f.read(), content) # test --dest option f2 = tempfile.NamedTemporaryFile() self._clush_t(["-w", "localhost", "-c", f.name, "--dest", f2.name], \ None, "") f2.seek(0) self.assertEqual(f2.read(), content) # test --user option f2 = tempfile.NamedTemporaryFile() self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \ "localhost", "--copy", f.name, "--dest", f2.name], None, "") f2.seek(0) self.assertEqual(f2.read(), content) # test --rcopy self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \ "localhost", "--rcopy", f.name, "--dest", \ os.path.dirname(f.name)], None, "") f2.seek(0) self.assertEqual(open("%s.localhost" % f.name).read(), content) def test_009_file_copy_tty(self): """test clush (file copy) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_008_file_copy() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_010_diff(self): """test clush (diff)""" self._clush_t(["-w", "localhost", "--diff", "echo", "ok"], None, "") self._clush_t(["-w", "localhost,127.0.0.1", "--diff", "echo", "ok"], None, "") def test_011_diff_tty(self): """test clush (diff) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_010_diff() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_012_stdin(self): """test clush (stdin)""" self._clush_t(["-w", "localhost", "sleep 1 && cat"], "ok", "localhost: ok\n") self._clush_t(["-w", "localhost", "cat"], "ok\nok", "localhost: ok\nlocalhost: ok\n") # write binary to stdin self._clush_t(["-w", "localhost", "gzip -d"], \ "1f8b0800869a744f00034bcbcf57484a2ce2020027b4dd1308000000".decode("hex"), "localhost: foo bar\n") def test_014_stderr(self): """test clush (stderr)""" self._clush_t(["-w", "localhost", "echo err 1>&2"], None, "", 0, "localhost: err\n") self._clush_t(["-b", "-w", "localhost", "echo err 1>&2"], None, "", 0, "localhost: err\n") self._clush_t(["-B", "-w", "localhost", "echo err 1>&2"], None, "---------------\nlocalhost\n---------------\nerr\n") def test_015_stderr_tty(self): """test clush (stderr) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_014_stderr() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_016_retcodes(self): """test clush (retcodes)""" self._clush_t(["-w", "localhost", "/bin/false"], None, "", 0, "clush: localhost: exited with exit code 1\n") self._clush_t(["-w", "localhost", "-b", "/bin/false"], None, "", 0, "clush: localhost: exited with exit code 1\n") self._clush_t(["-S", "-w", "localhost", "/bin/false"], None, "", 1, "clush: localhost: exited with exit code 1\n") for i in (1, 2, 127, 128, 255): self._clush_t(["-S", "-w", "localhost", "exit %d" % i], None, "", i, \ "clush: localhost: exited with exit code %d\n" % i) self._clush_t(["-v", "-w", "localhost", "/bin/false"], None, "", 0, "clush: localhost: exited with exit code 1\n") def test_017_retcodes_tty(self): """test clush (retcodes) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_016_retcodes() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') def test_018_timeout(self): """test clush (timeout)""" self._clush_t(["-w", "localhost", "-u", "5", "sleep 7"], None, "", 0, "clush: localhost: command timeout\n") self._clush_t(["-w", "localhost", "-u", "5", "-b", "sleep 7"], None, "", 0, "clush: localhost: command timeout\n") def test_019_timeout_tty(self): """test clush (timeout) [tty]""" setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) try: self.test_018_timeout() finally: delattr(ClusterShell.CLI.Clush, '_f_user_interaction') if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(CLIClushTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskThreadSuspendTest.py0000644000130500135250000000422611741571247021377 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2010-01-16 """Unit test for ClusterShell in multithreaded environments""" import random import sys import time import thread import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import * from ClusterShell.Event import EventHandler class TaskThreadSuspendTest(unittest.TestCase): def tearDown(self): task_cleanup() def testSuspendMiscTwoTasks(self): """test task suspend/resume (2 tasks)""" task = task_self() task2 = Task() task2.shell("sleep 4 && echo thr1") task2.resume() w = task.shell("sleep 1 && echo thr0", key=0) task.resume() self.assertEqual(task.key_buffer(0), "thr0") self.assertEqual(w.read(), "thr0") assert task2 != task task2.suspend() time.sleep(10) task2.resume() task_wait() task2.shell("echo suspend_test", key=1) task2.resume() task_wait() self.assertEqual(task2.key_buffer(1), "suspend_test") def _thread_delayed_unsuspend_func(self, task): """thread used to unsuspend task during task_wait()""" time_th = int(random.random()*6+5) #print "TIME unsuspend thread=%d" % time_th time.sleep(time_th) self.resumed = True task.resume() def testThreadTaskWaitWithSuspend(self): """test task_wait() with suspended tasks""" task = Task() self.resumed = False thread.start_new_thread(TaskThreadSuspendTest._thread_delayed_unsuspend_func, (self, task)) time_sh = int(random.random()*4) #print "TIME shell=%d" % time_sh task.shell("sleep %d" % time_sh) task.resume() time.sleep(1) suspended = task.suspend() for i in range(1, 4): task = Task() task.shell("sleep %d" % i) task.resume() time.sleep(1) task_wait() self.assert_(self.resumed or suspended == False) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskThreadSuspendTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskPortTest.py0000644000130500135250000000311411741571247017545 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2009-12-19 """Unit test for ClusterShell inter-Task msg""" import pickle import sys import threading import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import * from ClusterShell.Event import EventHandler class TaskPortTest(unittest.TestCase): def tearDown(self): task_cleanup() def testPortMsg1(self): """test port msg from main thread to task""" TaskPortTest.got_msg = False # create task in new thread task = Task() class PortHandler(EventHandler): def ev_msg(self, port, msg): # receive msg assert msg == "toto" assert port.task.thread == threading.currentThread() TaskPortTest.got_msg = True port.task.abort() # create non-autoclosing port port = task.port(handler=PortHandler()) task.resume() # send msg from main thread port.msg("toto") task_wait() self.assert_(TaskPortTest.got_msg) def testPortRemove(self): """test port remove [private as of 1.2]""" task = Task() class PortHandler(EventHandler): def ev_msg(self, port, msg): pass port = task.port(handler=PortHandler(), autoclose=True) task.resume() task._remove_port(port) task_wait() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskPortTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/RangeSetTest.py0000644000130500135250000012533311741571247017516 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.NodeSet.RangeSet test suite # Written by S. Thiell """Unit test for RangeSet""" import binascii import copy import pickle import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.RangeSet import RangeSet class RangeSetTest(unittest.TestCase): def _testRS(self, test, res, length): r1 = RangeSet(test, autostep=3) self.assertEqual(str(r1), res) self.assertEqual(len(r1), length) def testSimple(self): """test RangeSet simple ranges""" self._testRS("0", "0", 1) self._testRS("1", "1", 1) self._testRS("0-2", "0-2", 3) self._testRS("1-3", "1-3", 3) self._testRS("1-3,4-6", "1-6", 6) self._testRS("1-3,4-6,7-10", "1-10", 10) def testStepSimple(self): """test RangeSet simple step usages""" self._testRS("0-4/2", "0-4/2", 3) self._testRS("1-4/2", "1,3", 2) self._testRS("1-4/3", "1,4", 2) self._testRS("1-4/4", "1", 1) def testStepAdvanced(self): """test RangeSet advanced step usages""" self._testRS("1-4/4,2-6/2", "1,2-6/2", 4) # 1.6 small behavior change self._testRS("6-24/6,9-21/6", "6-24/3", 7) self._testRS("0-24/2,9-21/2", "0-8/2,9-22,24", 20) self._testRS("0-24/2,9-21/2,100", "0-8/2,9-22,24,100", 21) self._testRS("0-24/2,9-21/2,100-101", "0-8/2,9-22,24,100-101", 22) self._testRS("3-21/9,6-24/9,9-27/9", "3-27/3", 9) self._testRS("101-121/4,1-225/112", "1,101-121/4,225", 8) self._testRS("1-32/3,13-28/9", "1-31/3", 11) self._testRS("1-32/3,13-22/9", "1-31/3", 11) self._testRS("1-32/3,13-31/9", "1-31/3", 11) self._testRS("1-32/3,13-40/9", "1-31/3,40", 12) self._testRS("1-16/3,13-28/6", "1-19/3,25", 8) self._testRS("1-16/3,1-16/6", "1-16/3", 6) self._testRS("1-16/6,1-16/3", "1-16/3", 6) self._testRS("1-16/3,3-19/6", "1,3-4,7,9-10,13,15-16", 9) #self._testRS("1-16/3,3-19/4", "1,3-4,7,10-11,13,15-16,19", 10) # < 1.6 self._testRS("1-16/3,3-19/4", "1,3,4-10/3,11-15/2,16,19", 10) # >= 1.6 self._testRS("1-17/2,2-18/2", "1-18", 18) self._testRS("1-17/2,33-41/2,2-18/2", "1-18,33-41/2", 23) self._testRS("1-17/2,33-41/2,2-20/2", "1-18,20,33-41/2", 24) self._testRS("1-17/2,33-41/2,2-19/2", "1-18,33-41/2", 23) self._testRS("1968-1970,1972,1975,1978-1981,1984-1989", "1968-1970,1972-1978/3,1979-1981,1984-1989", 15) def testIntersectSimple(self): """test RangeSet with simple intersections of ranges""" r1 = RangeSet("4-34") r2 = RangeSet("27-42") r1.intersection_update(r2) self.assertEqual(str(r1), "27-34") self.assertEqual(len(r1), 8) r1 = RangeSet("2-450,654-700,800") r2 = RangeSet("500-502,690-820,830-840,900") r1.intersection_update(r2) self.assertEqual(str(r1), "690-700,800") self.assertEqual(len(r1), 12) r1 = RangeSet("2-450,654-700,800") r3 = r1.intersection(r2) self.assertEqual(str(r3), "690-700,800") self.assertEqual(len(r3), 12) r1 = RangeSet("2-450,654-700,800") r3 = r1 & r2 self.assertEqual(str(r3), "690-700,800") self.assertEqual(len(r3), 12) r1 = RangeSet() r3 = r1.intersection(r2) self.assertEqual(str(r3), "") self.assertEqual(len(r3), 0) def testIntersectStep(self): """test RangeSet with more intersections of ranges""" r1 = RangeSet("4-34/2") r2 = RangeSet("28-42/2") r1.intersection_update(r2) self.assertEqual(str(r1), "28,30,32,34") self.assertEqual(len(r1), 4) r1 = RangeSet("4-34/2") r2 = RangeSet("27-42/2") r1.intersection_update(r2) self.assertEqual(str(r1), "") self.assertEqual(len(r1), 0) r1 = RangeSet("2-60/3", autostep=3) r2 = RangeSet("3-50/2", autostep=3) r1.intersection_update(r2) self.assertEqual(str(r1), "5-47/6") self.assertEqual(len(r1), 8) def testSubSimple(self): """test RangeSet with simple difference of ranges""" r1 = RangeSet("4,7-33") r2 = RangeSet("8-33") r1.difference_update(r2) self.assertEqual(str(r1), "4,7") self.assertEqual(len(r1), 2) r1 = RangeSet("4,7-33") r3 = r1.difference(r2) self.assertEqual(str(r3), "4,7") self.assertEqual(len(r3), 2) r3 = r1 - r2 self.assertEqual(str(r3), "4,7") self.assertEqual(len(r3), 2) # bounds checking r1 = RangeSet("1-10,39-41,50-60") r2 = RangeSet("1-10,38-39,50-60") r1.difference_update(r2) self.assertEqual(len(r1), 2) self.assertEqual(str(r1), "40-41") r1 = RangeSet("1-20,39-41") r2 = RangeSet("1-20,41-42") r1.difference_update(r2) self.assertEqual(len(r1), 2) self.assertEqual(str(r1), "39-40") # difference(self) issue r1 = RangeSet("1-20,39-41") r1.difference_update(r1) self.assertEqual(len(r1), 0) self.assertEqual(str(r1), "") # strict mode r1 = RangeSet("4,7-33") r2 = RangeSet("8-33") r1.difference_update(r2, strict=True) self.assertEqual(str(r1), "4,7") self.assertEqual(len(r1), 2) r3 = RangeSet("4-5") self.assertRaises(KeyError, r1.difference_update, r3, True) def testSymmetricDifference(self): """test RangeSet.symmetric_difference_update()""" r1 = RangeSet("4,7-33") r2 = RangeSet("8-34") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "4,7,34") self.assertEqual(len(r1), 3) r1 = RangeSet("4,7-33") r3 = r1.symmetric_difference(r2) self.assertEqual(str(r3), "4,7,34") self.assertEqual(len(r3), 3) r3 = r1 ^ r2 self.assertEqual(str(r3), "4,7,34") self.assertEqual(len(r3), 3) r1 = RangeSet("5,7,10-12,33-50") r2 = RangeSet("8-34") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "5,7-9,13-32,35-50") self.assertEqual(len(r1), 40) r1 = RangeSet("8-34") r2 = RangeSet("5,7,10-12,33-50") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "5,7-9,13-32,35-50") self.assertEqual(len(r1), 40) r1 = RangeSet("8-30") r2 = RangeSet("31-40") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "8-40") self.assertEqual(len(r1), 33) r1 = RangeSet("8-30") r2 = RangeSet("8-30") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "") self.assertEqual(len(r1), 0) r1 = RangeSet("8-30") r2 = RangeSet("10-13,31-40") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "8-9,14-40") self.assertEqual(len(r1), 29) r1 = RangeSet("10-13,31-40") r2 = RangeSet("8-30") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "8-9,14-40") self.assertEqual(len(r1), 29) r1 = RangeSet("1,3,5,7") r2 = RangeSet("4-8") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "1,3-4,6,8") self.assertEqual(len(r1), 5) r1 = RangeSet("1-1000") r2 = RangeSet("0-40,60-100/4,300,1000,1002") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "0,41-59,61-63,65-67,69-71,73-75,77-79,81-83,85-87,89-91,93-95,97-99,101-299,301-999,1002") self.assertEqual(len(r1), 949) r1 = RangeSet("25,27,29-31,33-35,41-43,48,50-52,55-60,63,66-68,71-78") r2 = RangeSet("27-30,35,37-39,42,45-48,50,52-54,56,61,67,69-79,81-82") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "25,28,31,33-34,37-39,41,43,45-47,51,53-55,57-61,63,66,68-70,79,81-82") self.assertEqual(len(r1), 30) r1 = RangeSet("986-987,989,991-992,994-995,997,1002-1008,1010-1011,1015-1018,1021") r2 = RangeSet("989-990,992-994,997-1000") r1.symmetric_difference_update(r2) self.assertEqual(str(r1), "986-987,990-991,993,995,998-1000,1002-1008,1010-1011,1015-1018,1021") self.assertEqual(len(r1), 23) def testSubStep(self): """test RangeSet with more sub of ranges (with step)""" # case 1 no sub r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("3-33/2", autostep=3) self.assertEqual(r1.autostep, 3) self.assertEqual(r2.autostep, 3) r1.difference_update(r2) self.assertEqual(str(r1), "4-34/2") self.assertEqual(len(r1), 16) # case 2 diff left r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("2-14/2", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "16-34/2") self.assertEqual(len(r1), 10) # case 3 diff right r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("28-52/2", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "4-26/2") self.assertEqual(len(r1), 12) # case 4 diff with ranges split r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("12-18/2", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "4-10/2,20-34/2") self.assertEqual(len(r1), 12) # case 5+ more tricky diffs r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("28-55", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "4-26/2") self.assertEqual(len(r1), 12) r1 = RangeSet("4-34/2", autostep=3) r2 = RangeSet("27-55", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "4-26/2") self.assertEqual(len(r1), 12) r1 = RangeSet("1-100", autostep=3) r2 = RangeSet("2-98/2", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "1-99/2,100") self.assertEqual(len(r1), 51) r1 = RangeSet("1-100,102,105-242,800", autostep=3) r2 = RangeSet("1-1000/3", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "2-3,5-6,8-9,11-12,14-15,17-18,20-21,23-24,26-27,29-30,32-33,35-36,38-39,41-42,44-45,47-48,50-51,53-54,56-57,59-60,62-63,65-66,68-69,71-72,74-75,77-78,80-81,83-84,86-87,89-90,92-93,95-96,98,99-105/3,107-108,110-111,113-114,116-117,119-120,122-123,125-126,128-129,131-132,134-135,137-138,140-141,143-144,146-147,149-150,152-153,155-156,158-159,161-162,164-165,167-168,170-171,173-174,176-177,179-180,182-183,185-186,188-189,191-192,194-195,197-198,200-201,203-204,206-207,209-210,212-213,215-216,218-219,221-222,224-225,227-228,230-231,233-234,236-237,239-240,242,800") self.assertEqual(len(r1), 160) r1 = RangeSet("1-1000", autostep=3) r2 = RangeSet("2-999/2", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "1-999/2,1000") self.assertEqual(len(r1), 501) r1 = RangeSet("1-100/3,40-60/3", autostep=3) r2 = RangeSet("31-61/3", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "1-28/3,64-100/3") self.assertEqual(len(r1), 23) r1 = RangeSet("1-100/3,40-60/3", autostep=3) r2 = RangeSet("30-80/5", autostep=3) r1.difference_update(r2) self.assertEqual(str(r1), "1-37/3,43-52/3,58-67/3,73-100/3") self.assertEqual(len(r1), 31) def testContains(self): """test RangeSet.__contains__()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) self.assert_(99 in r1) self.assert_("99" in r1) self.assert_("099" in r1) self.assertRaises(TypeError, r1.__contains__, object()) self.assert_(101 not in r1) self.assertEqual(len(r1), 240) r2 = RangeSet("1-100/3,40-60/3", autostep=3) self.assertEqual(len(r2), 34) self.assert_(1 in r2) self.assert_(4 in r2) self.assert_(2 not in r2) self.assert_(3 not in r2) self.assert_(40 in r2) self.assert_(101 not in r2) r3 = RangeSet("0003-0143,0360-1000") self.assert_(360 in r3) self.assert_("360" in r3) self.assert_("0360" in r3) r4 = RangeSet("00-02") self.assert_("00" in r4) self.assert_(0 in r4) self.assert_("0" in r4) self.assert_("01" in r4) self.assert_(1 in r4) self.assert_("1" in r4) self.assert_("02" in r4) self.assert_(not "03" in r4) # r1 = RangeSet("115-117,130,132,166-170,4780-4999") self.assertEqual(len(r1), 230) r2 = RangeSet("116-117,130,4781-4999") self.assertEqual(len(r2), 222) self.assertTrue(r2 in r1) self.assertFalse(r1 in r2) r2 = RangeSet("5000") self.assertFalse(r2 in r1) r2 = RangeSet("4999") self.assertTrue(r2 in r1) def testIsSuperSet(self): """test RangeSet.issuperset()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r2 = RangeSet("3-98,140-199,800") self.assertEqual(len(r2), 157) self.assertTrue(r1.issuperset(r1)) self.assertTrue(r1.issuperset(r2)) self.assertTrue(r1 >= r1) self.assertTrue(r1 > r2) self.assertFalse(r2 > r1) r2 = RangeSet("3-98,140-199,243,800") self.assertEqual(len(r2), 158) self.assertFalse(r1.issuperset(r2)) self.assertFalse(r1 > r2) def testIsSubSet(self): """test RangeSet.issubset()""" r1 = RangeSet("1-100,102,105-242,800-900/2") r2 = RangeSet("3,800,802,804,888") self.assertTrue(r2.issubset(r2)) self.assertTrue(r2.issubset(r1)) self.assertTrue(r2 <= r1) self.assertTrue(r2 < r1) self.assertTrue(r1 > r2) self.assertFalse(r1 < r2) self.assertFalse(r1 <= r2) self.assertFalse(r2 >= r1) # since v1.6, padding is ignored when computing set operations r1 = RangeSet("1-100") r2 = RangeSet("001-100") self.assertTrue(r1.issubset(r2)) def testGetItem(self): """test RangeSet.__getitem__()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) self.assertEqual(r1[0], 1) self.assertEqual(r1[1], 2) self.assertEqual(r1[2], 3) self.assertEqual(r1[99], 100) self.assertEqual(r1[100], 102) self.assertEqual(r1[101], 105) self.assertEqual(r1[102], 106) self.assertEqual(r1[103], 107) self.assertEqual(r1[237], 241) self.assertEqual(r1[238], 242) self.assertEqual(r1[239], 800) self.assertRaises(IndexError, r1.__getitem__, 240) self.assertRaises(IndexError, r1.__getitem__, 241) # negative indices self.assertEqual(r1[-1], 800) self.assertEqual(r1[-240], 1) for n in range(1, len(r1)): self.assertEqual(r1[-n], r1[len(r1)-n]) self.assertRaises(IndexError, r1.__getitem__, -len(r1)-1) self.assertRaises(IndexError, r1.__getitem__, -len(r1)-2) r2 = RangeSet("1-37/3,43-52/3,58-67/3,73-100/3,102-106/2") self.assertEqual(len(r2), 34) self.assertEqual(r2[0], 1) self.assertEqual(r2[1], 4) self.assertEqual(r2[2], 7) self.assertEqual(r2[12], 37) self.assertEqual(r2[13], 43) self.assertEqual(r2[14], 46) self.assertEqual(r2[16], 52) self.assertEqual(r2[17], 58) self.assertEqual(r2[29], 97) self.assertEqual(r2[30], 100) self.assertEqual(r2[31], 102) self.assertEqual(r2[32], 104) self.assertEqual(r2[33], 106) self.assertRaises(TypeError, r2.__getitem__, "foo") def testGetSlice(self): """test RangeSet.__getitem__() with slice""" r0 = RangeSet("1-12") self.assertEqual(r0[0:3], RangeSet("1-3")) self.assertEqual(r0[2:7], RangeSet("3-7")) # negative indices - sl_start self.assertEqual(r0[-1:0], RangeSet()) self.assertEqual(r0[-2:0], RangeSet()) self.assertEqual(r0[-11:0], RangeSet()) self.assertEqual(r0[-12:0], RangeSet()) self.assertEqual(r0[-13:0], RangeSet()) self.assertEqual(r0[-1000:0], RangeSet()) self.assertEqual(r0[-1:], RangeSet("12")) self.assertEqual(r0[-2:], RangeSet("11-12")) self.assertEqual(r0[-11:], RangeSet("2-12")) self.assertEqual(r0[-12:], RangeSet("1-12")) self.assertEqual(r0[-13:], RangeSet("1-12")) self.assertEqual(r0[-1000:], RangeSet("1-12")) self.assertEqual(r0[-13:1], RangeSet("1")) self.assertEqual(r0[-13:2], RangeSet("1-2")) self.assertEqual(r0[-13:11], RangeSet("1-11")) self.assertEqual(r0[-13:12], RangeSet("1-12")) self.assertEqual(r0[-13:13], RangeSet("1-12")) # negative indices - sl_stop self.assertEqual(r0[0:-1], RangeSet("1-11")) self.assertEqual(r0[:-1], RangeSet("1-11")) self.assertEqual(r0[0:-2], RangeSet("1-10")) self.assertEqual(r0[:-2], RangeSet("1-10")) self.assertEqual(r0[1:-2], RangeSet("2-10")) self.assertEqual(r0[4:-4], RangeSet("5-8")) self.assertEqual(r0[5:-5], RangeSet("6-7")) self.assertEqual(r0[6:-5], RangeSet("7")) self.assertEqual(r0[6:-6], RangeSet()) self.assertEqual(r0[7:-6], RangeSet()) self.assertEqual(r0[0:-1000], RangeSet()) r1 = RangeSet("10-14,16-20") self.assertEqual(r1[2:6], RangeSet("12-14,16")) self.assertEqual(r1[2:7], RangeSet("12-14,16-17")) r1 = RangeSet("1-2,4,9,10-12") self.assertEqual(r1[0:3], RangeSet("1-2,4")) self.assertEqual(r1[0:4], RangeSet("1-2,4,9")) self.assertEqual(r1[2:6], RangeSet("4,9,10-11")) self.assertEqual(r1[2:4], RangeSet("4,9")) self.assertEqual(r1[5:6], RangeSet("11")) self.assertEqual(r1[6:7], RangeSet("12")) self.assertEqual(r1[4:7], RangeSet("10-12")) # Slice indices are silently truncated to fall in the allowed range self.assertEqual(r1[2:100], RangeSet("4,9-12")) self.assertEqual(r1[9:10], RangeSet()) # Slice stepping self.assertEqual(r1[0:1:2], RangeSet("1")) self.assertEqual(r1[0:2:2], RangeSet("1")) self.assertEqual(r1[0:3:2], RangeSet("1,4")) self.assertEqual(r1[0:4:2], RangeSet("1,4")) self.assertEqual(r1[0:5:2], RangeSet("1,4,10")) self.assertEqual(r1[0:6:2], RangeSet("1,4,10")) self.assertEqual(r1[0:7:2], RangeSet("1,4,10,12")) self.assertEqual(r1[0:8:2], RangeSet("1,4,10,12")) self.assertEqual(r1[0:9:2], RangeSet("1,4,10,12")) self.assertEqual(r1[0:10:2], RangeSet("1,4,10,12")) self.assertEqual(r1[0:7:3], RangeSet("1,9,12")) self.assertEqual(r1[0:7:4], RangeSet("1,10")) self.assertEqual(len(r1[1:1:2]), 0) self.assertEqual(r1[1:2:2], RangeSet("2")) self.assertEqual(r1[1:3:2], RangeSet("2")) self.assertEqual(r1[1:4:2], RangeSet("2,9")) self.assertEqual(r1[1:5:2], RangeSet("2,9")) self.assertEqual(r1[1:6:2], RangeSet("2,9,11")) self.assertEqual(r1[1:7:2], RangeSet("2,9,11")) # negative indices - sl_step self.assertEqual(r1[::-2], RangeSet("1,4,10,12")) r2 = RangeSet("1-2,4,9,10-13") self.assertEqual(r2[::-2], RangeSet("2,9,11,13")) self.assertEqual(r2[::-3], RangeSet("2,10,13")) self.assertEqual(r2[::-4], RangeSet("9,13")) self.assertEqual(r2[::-5], RangeSet("4,13")) self.assertEqual(r2[::-6], RangeSet("2,13")) self.assertEqual(r2[::-7], RangeSet("1,13")) self.assertEqual(r2[::-8], RangeSet("13")) self.assertEqual(r2[::-9], RangeSet("13")) # Partial slices self.assertEqual(r1[2:], RangeSet("4,9-12")) self.assertEqual(r1[:3], RangeSet("1-2,4")) self.assertEqual(r1[:3:2], RangeSet("1,4")) # Twisted r2 = RangeSet("1-9/2,12-32/4") self.assertEqual(r2[5:10:2], RangeSet("12-28/8")) self.assertEqual(r2[5:10:2], RangeSet("12-28/8", autostep=2)) self.assertEqual(r2[1:12:3], RangeSet("3,9,20,32")) # FIXME: use nosetests/@raises to do that... self.assertRaises(TypeError, r1.__getitem__, slice('foo', 'bar')) self.assertRaises(TypeError, r1.__getitem__, slice(1, 3, 'bar')) r3 = RangeSet("0-600") self.assertEqual(r3[30:389], RangeSet("30-388")) r3 = RangeSet("0-6000") self.assertEqual(r3[30:389:2], RangeSet("30-389/2")) self.assertEqual(r3[30:389:2], RangeSet("30-389/2", autostep=2)) def testSplit(self): """test RangeSet.split()""" # Empty rangeset rangeset = RangeSet() self.assertEqual(len(list(rangeset.split(2))), 0) # Not enough element rangeset = RangeSet("1") self.assertEqual((RangeSet("1"),), tuple(rangeset.split(2))) # Exact number of elements rangeset = RangeSet("1-6") self.assertEqual((RangeSet("1-2"), RangeSet("3-4"), RangeSet("5-6")), \ tuple(rangeset.split(3))) # Check limit results rangeset = RangeSet("0-3") for i in (4, 5): self.assertEqual((RangeSet("0"), RangeSet("1"), \ RangeSet("2"), RangeSet("3")), \ tuple(rangeset.split(i))) def testAdd(self): """test RangeSet.add()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r1.add(801) self.assertEqual(len(r1), 241) self.assertEqual(r1[0], 1) self.assertEqual(r1[240], 801) r1.add(788) self.assertEqual(str(r1), "1-100,102,105-242,788,800-801") self.assertEqual(len(r1), 242) self.assertEqual(r1[0], 1) self.assertEqual(r1[239], 788) self.assertEqual(r1[240], 800) r1.add(812) self.assertEqual(len(r1), 243) # test forced padding r1 = RangeSet("1-100,102,105-242,800") r1.add(801, pad=3) self.assertEqual(len(r1), 241) self.assertEqual(str(r1), "001-100,102,105-242,800-801") r1.padding = 4 self.assertEqual(len(r1), 241) self.assertEqual(str(r1), "0001-0100,0102,0105-0242,0800-0801") def testUpdate(self): """test RangeSet.update()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r2 = RangeSet("243-799,1924-1984") self.assertEqual(len(r2), 618) r1.update(r2) self.assertEqual(type(r1), RangeSet) self.assertEqual(r1.padding, None) self.assertEqual(len(r1), 240+618) self.assertEqual(str(r1), "1-100,102,105-800,1924-1984") r1 = RangeSet("1-100,102,105-242,800") r1.union_update(r2) self.assertEqual(len(r1), 240+618) self.assertEqual(str(r1), "1-100,102,105-800,1924-1984") def testUnion(self): """test RangeSet.union()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r2 = RangeSet("243-799,1924-1984") self.assertEqual(len(r2), 618) r3 = r1.union(r2) self.assertEqual(type(r3), RangeSet) self.assertEqual(r3.padding, None) self.assertEqual(len(r3), 240+618) self.assertEqual(str(r3), "1-100,102,105-800,1924-1984") r4 = r1 | r2 self.assertEqual(len(r4), 240+618) self.assertEqual(str(r4), "1-100,102,105-800,1924-1984") # test with overlap r2 = RangeSet("200-799") r3 = r1.union(r2) self.assertEqual(len(r3), 797) self.assertEqual(str(r3), "1-100,102,105-800") r4 = r1 | r2 self.assertEqual(len(r4), 797) self.assertEqual(str(r4), "1-100,102,105-800") def testRemove(self): """test RangeSet.remove()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r1.remove(100) self.assertEqual(len(r1), 239) self.assertEqual(str(r1), "1-99,102,105-242,800") self.assertRaises(KeyError, r1.remove, 101) # test remove integer-castable type (convenience) r1.remove("106") # non integer castable cases raise ValueError (documented since 1.6) self.assertRaises(ValueError, r1.remove, "foo") def testDiscard(self): """test RangeSet.discard()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) r1.discard(100) self.assertEqual(len(r1), 239) self.assertEqual(str(r1), "1-99,102,105-242,800") r1.discard(101) # should not raise KeyError # test remove integer-castable type (convenience) r1.remove("106") r1.discard("foo") def testClear(self): """test RangeSet.clear()""" r1 = RangeSet("1-100,102,105-242,800") self.assertEqual(len(r1), 240) self.assertEqual(str(r1), "1-100,102,105-242,800") r1.clear() self.assertEqual(len(r1), 0) self.assertEqual(str(r1), "") def testConstructorIterate(self): """test RangeSet(iterable) constructor""" # from list rgs = RangeSet([3,5,6,7,8,1]) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) rgs.add(10) self.assertEqual(str(rgs), "1,3,5-8,10") self.assertEqual(len(rgs), 7) # from set rgs = RangeSet(set([3,5,6,7,8,1])) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) # from RangeSet r1 = RangeSet("1,3,5-8") rgs = RangeSet(r1) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) def testFromListConstructor(self): """test RangeSet.fromlist() constructor""" rgs = RangeSet.fromlist([ "3", "5-8", "1" ]) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) rgs = RangeSet.fromlist([ RangeSet("3"), RangeSet("5-8"), RangeSet("1") ]) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) rgs = RangeSet.fromlist([set([3,5,6,7,8,1])]) self.assertEqual(str(rgs), "1,3,5-8") self.assertEqual(len(rgs), 6) def testFromOneConstructor(self): """test RangeSet.fromone() constructor""" rgs = RangeSet.fromone(42) self.assertEqual(str(rgs), "42") self.assertEqual(len(rgs), 1) # also support slice object (v1.6+) rgs = RangeSet.fromone(slice(42)) self.assertEqual(str(rgs), "0-41") self.assertEqual(len(rgs), 42) self.assertRaises(ValueError, RangeSet.fromone, slice(12, None)) rgs = RangeSet.fromone(slice(42, 43)) self.assertEqual(str(rgs), "42") self.assertEqual(len(rgs), 1) rgs = RangeSet.fromone(slice(42, 48)) self.assertEqual(str(rgs), "42-47") self.assertEqual(len(rgs), 6) rgs = RangeSet.fromone(slice(42, 57, 2)) self.assertEqual(str(rgs), "42,44,46,48,50,52,54,56") rgs.autostep = 3 self.assertEqual(str(rgs), "42-56/2") self.assertEqual(len(rgs), 8) def testIterator(self): """test RangeSet iterator""" matches = [ 1, 3, 4, 5, 6, 7, 8, 11 ] rgs = RangeSet.fromlist([ "11", "3", "5-8", "1", "4" ]) cnt = 0 for rg in rgs: self.assertEqual(rg, matches[cnt]) cnt += 1 self.assertEqual(cnt, len(matches)) # with padding rgs = RangeSet.fromlist([ "011", "003", "005-008", "001", "004" ]) cnt = 0 for rg in rgs: self.assertTrue(type(rg) is int) self.assertEqual(rg, matches[cnt]) cnt += 1 self.assertEqual(cnt, len(matches)) def testStringIterator(self): """test RangeSet string iterator striter()""" matches = [ 1, 3, 4, 5, 6, 7, 8, 11 ] rgs = RangeSet.fromlist([ "11", "3", "5-8", "1", "4" ]) cnt = 0 for rg in rgs.striter(): self.assertEqual(rg, str(matches[cnt])) cnt += 1 self.assertEqual(cnt, len(matches)) # with padding rgs = RangeSet.fromlist([ "011", "003", "005-008", "001", "004" ]) cnt = 0 for rg in rgs.striter(): self.assertTrue(type(rg) is str) self.assertEqual(rg, "%0*d" % (3, matches[cnt])) cnt += 1 self.assertEqual(cnt, len(matches)) def testBinarySanityCheck(self): """test RangeSet binary sanity check""" rg1 = RangeSet("1-5") rg2 = "4-6" self.assertRaises(TypeError, rg1.__gt__, rg2) self.assertRaises(TypeError, rg1.__lt__, rg2) def testBinarySanityCheckNotImplementedSubtle(self): """test RangeSet binary sanity check (NotImplemented subtle)""" rg1 = RangeSet("1-5") rg2 = "4-6" self.assertEqual(rg1.__and__(rg2), NotImplemented) self.assertEqual(rg1.__or__(rg2), NotImplemented) self.assertEqual(rg1.__sub__(rg2), NotImplemented) self.assertEqual(rg1.__xor__(rg2), NotImplemented) # Should implicitely raises TypeError if the real operator # version is invoked. To test that, we perform a manual check # as an additional function would be needed to check with # assertRaises(): good_error = False try: rg3 = rg1 & rg2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for &") good_error = False try: rg3 = rg1 | rg2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for |") good_error = False try: rg3 = rg1 - rg2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for -") good_error = False try: rg3 = rg1 ^ rg2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for ^") def testIsSubSetError(self): """test RangeSet.issubset() error""" rg1 = RangeSet("1-5") rg2 = "4-6" self.assertRaises(TypeError, rg1.issubset, rg2) def testEquality(self): """test RangeSet equality""" rg0_1 = RangeSet() rg0_2 = RangeSet() self.assertEqual(rg0_1, rg0_2) rg1 = RangeSet("1-4") rg2 = RangeSet("1-4") self.assertEqual(rg1, rg2) rg3 = RangeSet("2-5") self.assertNotEqual(rg1, rg3) rg4 = RangeSet("1,2,3,4") self.assertEqual(rg1, rg4) rg5 = RangeSet("1,2,4") self.assertNotEqual(rg1, rg5) if rg1 == None: self.fail("rg1 == None succeeded") if rg1 != None: pass else: self.fail("rg1 != None failed") def testAddRange(self): """test RangeSet.add_range()""" r1 = RangeSet() r1.add_range(1, 100, 1) self.assertEqual(len(r1), 99) self.assertEqual(str(r1), "1-99") r1.add_range(40, 101, 1) self.assertEqual(len(r1), 100) self.assertEqual(str(r1), "1-100") r1.add_range(399, 423, 2) self.assertEqual(len(r1), 112) self.assertEqual(str(r1), "1-100,399,401,403,405,407,409,411,413,415,417,419,421") # With autostep... r1 = RangeSet(autostep=3) r1.add_range(1, 100, 1) self.assertEqual(r1.autostep, 3) self.assertEqual(len(r1), 99) self.assertEqual(str(r1), "1-99") r1.add_range(40, 101, 1) self.assertEqual(len(r1), 100) self.assertEqual(str(r1), "1-100") r1.add_range(399, 423, 2) self.assertEqual(len(r1), 112) self.assertEqual(str(r1), "1-100,399-421/2") # Bound checks r1 = RangeSet("1-30", autostep=2) self.assertEqual(len(r1), 30) self.assertEqual(str(r1), "1-30") self.assertEqual(r1.autostep, 2) r1.add_range(32, 35, 1) self.assertEqual(len(r1), 33) self.assertEqual(str(r1), "1-30,32-34") r1.add_range(31, 32, 1) self.assertEqual(len(r1), 34) self.assertEqual(str(r1), "1-34") r1 = RangeSet("1-30/4") self.assertEqual(len(r1), 8) self.assertEqual(str(r1), "1,5,9,13,17,21,25,29") r1.add_range(30, 32, 1) self.assertEqual(len(r1), 10) self.assertEqual(str(r1), "1,5,9,13,17,21,25,29-31") r1.add_range(40, 65, 10) self.assertEqual(len(r1), 13) self.assertEqual(str(r1), "1,5,9,13,17,21,25,29-31,40,50,60") r1 = RangeSet("1-30", autostep=3) r1.add_range(40, 65, 10) self.assertEqual(r1.autostep, 3) self.assertEqual(len(r1), 33) self.assertEqual(str(r1), "1-29,30-60/10") # One r1.add_range(103, 104) self.assertEqual(len(r1), 34) self.assertEqual(str(r1), "1-29,30-60/10,103") # Zero self.assertRaises(AssertionError, r1.add_range, 103, 103) def testSlices(self): """test RangeSet.slices()""" r1 = RangeSet() self.assertEqual(len(r1), 0) self.assertEqual(len(list(r1.slices())), 0) # Without autostep r1 = RangeSet("1-7/2,8-12,3000-3019") self.assertEqual(r1.autostep, None) self.assertEqual(len(r1), 29) self.assertEqual(list(r1.slices()), [slice(1, 2, 1), slice(3, 4, 1), \ slice(5, 6, 1), slice(7, 13, 1), slice(3000, 3020, 1)]) # With autostep r1 = RangeSet("1-7/2,8-12,3000-3019", autostep=2) self.assertEqual(len(r1), 29) self.assertEqual(r1.autostep, 2) self.assertEqual(list(r1.slices()), [slice(1, 8, 2), slice(8, 13, 1), \ slice(3000, 3020, 1)]) def testCopy(self): """test RangeSet.copy()""" rangeset = RangeSet("115-117,130,166-170,4780-4999") self.assertEqual(len(rangeset), 229) self.assertEqual(str(rangeset), "115-117,130,166-170,4780-4999") r1 = rangeset.copy() r2 = rangeset.copy() self.assertEqual(rangeset, r1) # content equality r1.remove(166) self.assertEqual(len(rangeset), len(r1) + 1) self.assertNotEqual(rangeset, r1) self.assertEqual(str(rangeset), "115-117,130,166-170,4780-4999") self.assertEqual(str(r1), "115-117,130,167-170,4780-4999") r2.update(RangeSet("118")) self.assertNotEqual(rangeset, r2) self.assertNotEqual(r1, r2) self.assertEqual(len(rangeset) + 1, len(r2)) self.assertEqual(str(rangeset), "115-117,130,166-170,4780-4999") self.assertEqual(str(r1), "115-117,130,167-170,4780-4999") self.assertEqual(str(r2), "115-118,130,166-170,4780-4999") def test_unpickle_v1_3_py24(self): """test RangeSet unpickling (against v1.3/py24)""" rngset = pickle.loads(binascii.a2b_base64("gAIoY0NsdXN0ZXJTaGVsbC5Ob2RlU2V0ClJhbmdlU2V0CnEAb3EBfXECKFUHX2xlbmd0aHEDS2RVCV9hdXRvc3RlcHEER1SySa0llMN9VQdfcmFuZ2VzcQVdcQYoKEsFSwVLAUsAdHEHKEsHS2ZLAUsAdHEIKEtoS2hLAUsAdHEJKEtqS2tLAUsAdHEKZXViLg==")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_unpickle_v1_3_py26(self): """test RangeSet unpickling (against v1.3/py26)""" rngset = pickle.loads(binascii.a2b_base64("gAIoY0NsdXN0ZXJTaGVsbC5Ob2RlU2V0ClJhbmdlU2V0CnEAb3EBfXECKFUHX2xlbmd0aHEDS2RVCV9hdXRvc3RlcHEER1SySa0llMN9VQdfcmFuZ2VzcQVdcQYoKEsFSwVLAUsAdHEHKEsHS2ZLAUsAdHEIKEtoS2hLAUsAdHEJKEtqS2tLAUsAdHEKZXViLg==")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) # unpickle_v1_4_py24 : unpickling fails as v1.4 does not have slice pickling workaround def test_unpickle_v1_4_py26(self): """test RangeSet unpickling (against v1.4/py26)""" rngset = pickle.loads(binascii.a2b_base64("gAIoY0NsdXN0ZXJTaGVsbC5Ob2RlU2V0ClJhbmdlU2V0CnEAb3EBfXEDKFUHX2xlbmd0aHEES2RVCV9hdXRvc3RlcHEFR1SySa0llMN9VQdfcmFuZ2VzcQZdcQcoY19fYnVpbHRpbl9fCnNsaWNlCnEISwVLBksBh3EJUnEKSwCGcQtoCEsHS2dLAYdxDFJxDUsAhnEOaAhLaEtpSwGHcQ9ScRBLAIZxEWgIS2pLbEsBh3ESUnETSwCGcRRlVQhfdmVyc2lvbnEVSwJ1Yi4=")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_unpickle_v1_5_py24(self): """test RangeSet unpickling (against v1.5/py24)""" rngset = pickle.loads(binascii.a2b_base64("gAIoY0NsdXN0ZXJTaGVsbC5Ob2RlU2V0ClJhbmdlU2V0CnEAb3EBfXEDKFUHX2xlbmd0aHEES2RVCV9hdXRvc3RlcHEFR1SySa0llMN9VQdfcmFuZ2VzcQZdcQcoSwVLBksBh3EISwCGcQlLB0tnSwGHcQpLAIZxC0toS2lLAYdxDEsAhnENS2pLbEsBh3EOSwCGcQ9lVQhfdmVyc2lvbnEQSwJ1Yi4=")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_unpickle_v1_5_py26(self): """test RangeSet unpickling (against v1.5/py26)""" rngset = pickle.loads(binascii.a2b_base64("gAIoY0NsdXN0ZXJTaGVsbC5Ob2RlU2V0ClJhbmdlU2V0CnEAb3EBfXEDKFUHX2xlbmd0aHEES2RVCV9hdXRvc3RlcHEFR1SySa0llMN9VQdfcmFuZ2VzcQZdcQcoY19fYnVpbHRpbl9fCnNsaWNlCnEISwVLBksBh3EJUnEKSwCGcQtoCEsHS2dLAYdxDFJxDUsAhnEOaAhLaEtpSwGHcQ9ScRBLAIZxEWgIS2pLbEsBh3ESUnETSwCGcRRlVQhfdmVyc2lvbnEVSwJ1Yi4=")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_unpickle_v1_6_py24(self): """test RangeSet unpickling (against v1.6/py24)""" rngset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLlJhbmdlU2V0ClJhbmdlU2V0CnEAVRM1LDctMTAyLDEwNCwxMDYtMTA3cQGFcQJScQN9cQQoVQdwYWRkaW5ncQVOVQlfYXV0b3N0ZXBxBkdUskmtJZTDfVUIX3ZlcnNpb25xB0sDdWIu")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_unpickle_v1_6_py26(self): """test RangeSet unpickling (against v1.6/py26)""" rngset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLlJhbmdlU2V0ClJhbmdlU2V0CnEAVRM1LDctMTAyLDEwNCwxMDYtMTA3cQGFcQJScQN9cQQoVQdwYWRkaW5ncQVOVQlfYXV0b3N0ZXBxBkdUskmtJZTDfVUIX3ZlcnNpb25xB0sDdWIu")) self.assertEqual(rngset, RangeSet("5,7-102,104,106-107")) self.assertEqual(str(rngset), "5,7-102,104,106-107") self.assertEqual(len(rngset), 100) self.assertEqual(rngset[0], 5) self.assertEqual(rngset[1], 7) self.assertEqual(rngset[-1], 107) def test_pickle_current(self): """test RangeSet pickling (current version)""" dump = pickle.dumps(RangeSet("1-100")) self.assertNotEqual(dump, None) rngset = pickle.loads(dump) self.assertEqual(rngset, RangeSet("1-100")) self.assertEqual(str(rngset), "1-100") self.assertEqual(rngset[0], 1) self.assertEqual(rngset[1], 2) self.assertEqual(rngset[-1], 100) def testIntersectionLength(self): """test RangeSet intersection/length""" r1 = RangeSet("115-117,130,166-170,4780-4999") self.assertEqual(len(r1), 229) r2 = RangeSet("116-117,130,4781-4999") self.assertEqual(len(r2), 222) res = r1.intersection(r2) self.assertEqual(len(res), 222) r1 = RangeSet("115-200") self.assertEqual(len(r1), 86) r2 = RangeSet("116-117,119,123-131,133,149,199") self.assertEqual(len(r2), 15) res = r1.intersection(r2) self.assertEqual(len(res), 15) # StopIteration test r1 = RangeSet("115-117,130,166-170,4780-4999,5003") self.assertEqual(len(r1), 230) r2 = RangeSet("116-117,130,4781-4999") self.assertEqual(len(r2), 222) res = r1.intersection(r2) self.assertEqual(len(res), 222) # StopIteration test2 r1 = RangeSet("130,166-170,4780-4999") self.assertEqual(len(r1), 226) r2 = RangeSet("116-117") self.assertEqual(len(r2), 2) res = r1.intersection(r2) self.assertEqual(len(res), 0) def testFolding(self): """test RangeSet folding conditions""" r1 = RangeSet("112,114-117,119,121,130,132,134,136,138,139-141,144,147-148", autostep=6) self.assertEqual(str(r1), "112,114-117,119,121,130,132,134,136,138-141,144,147-148") r1.autostep = 5 self.assertEqual(str(r1), "112,114-117,119,121,130-138/2,139-141,144,147-148") r1 = RangeSet("1,3-4,6,8") self.assertEqual(str(r1), "1,3-4,6,8") r1 = RangeSet("1,3-4,6,8", autostep=4) self.assertEqual(str(r1), "1,3-4,6,8") r1 = RangeSet("1,3-4,6,8", autostep=2) self.assertEqual(str(r1), "1,3,4-8/2") r1 = RangeSet("1,3-4,6,8", autostep=3) self.assertEqual(str(r1), "1,3,4-8/2") # empty set r1 = RangeSet(autostep=3) self.assertEqual(str(r1), "") def test_ior(self): """test RangeSet.__ior__()""" r1 = RangeSet("1,3-9,14-21,30-39,42") r2 = RangeSet("2-5,10-32,35,40-41") r1 |= r2 self.assertEqual(len(r1), 42) self.assertEqual(str(r1), "1-42") def test_iand(self): """test RangeSet.__iand__()""" r1 = RangeSet("1,3-9,14-21,30-39,42") r2 = RangeSet("2-5,10-32,35,40-41") r1 &= r2 self.assertEqual(len(r1), 15) self.assertEqual(str(r1), "3-5,14-21,30-32,35") def test_ixor(self): """test RangeSet.__ixor__()""" r1 = RangeSet("1,3-9,14-21,30-39,42") r2 = RangeSet("2-5,10-32,35,40-41") r1 ^= r2 self.assertEqual(len(r1), 27) self.assertEqual(str(r1), "1-2,6-13,22-29,33-34,36-42") def test_isub(self): """test RangeSet.__isub__()""" r1 = RangeSet("1,3-9,14-21,30-39,42") r2 = RangeSet("2-5,10-32,35,40-41") r1 -= r2 self.assertEqual(len(r1), 12) self.assertEqual(str(r1), "1,6-9,33-34,36-39,42") def test_contiguous(self): r1 = RangeSet("1,3-9,14-21,30-39,42") self.assertEqual(['1', '3-9', '14-21', '30-39', '42'], [str(ns) for ns in r1.contiguous()]) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(RangeSetTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/CLIConfigTest.py0000644000130500135250000002360511741571247017542 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.CLI.Config test suite # Written by S. Thiell 2010-09-19 """Unit test for CLI.Config""" import resource import sys import tempfile import unittest sys.path.insert(0, '../lib') from ClusterShell.CLI.Clush import set_fdlimit from ClusterShell.CLI.Config import ClushConfig, ClushConfigError from ClusterShell.CLI.Display import * from ClusterShell.CLI.OptionParser import OptionParser class CLIClushConfigTest(unittest.TestCase): """This test case performs a complete CLI.Config.ClushConfig verification. Also CLI.OptionParser is used and some parts are verified btw. """ def testClushConfigEmpty(self): """test CLI.Config.ClushConfig (empty)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" """) parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) self.assertEqual(config.color, WHENCOLOR_CHOICES[-1]) self.assertEqual(config.verbosity, VERB_STD) self.assertEqual(config.fanout, 64) self.assertEqual(config.node_count, True) self.assertEqual(config.connect_timeout, 30) self.assertEqual(config.command_timeout, 0) self.assertEqual(config.ssh_user, None) self.assertEqual(config.ssh_path, None) self.assertEqual(config.ssh_options, None) f.close() def testClushConfigAlmostEmpty(self): """test CLI.Config.ClushConfig (almost empty)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] """) parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) self.assertEqual(config.color, WHENCOLOR_CHOICES[-1]) self.assertEqual(config.verbosity, VERB_STD) self.assertEqual(config.node_count, True) self.assertEqual(config.fanout, 64) self.assertEqual(config.connect_timeout, 30) self.assertEqual(config.command_timeout, 0) self.assertEqual(config.ssh_user, None) self.assertEqual(config.ssh_path, None) self.assertEqual(config.ssh_options, None) f.close() def testClushConfigDefault(self): """test CLI.Config.ClushConfig (default)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] fanout: 42 connect_timeout: 14 command_timeout: 0 history_size: 100 color: auto verbosity: 1 #ssh_user: root #ssh_path: /usr/bin/ssh #ssh_options: -oStrictHostKeyChecking=no """) f.flush() parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) display = Display(options, config) self.assert_(display != None) display.vprint(VERB_STD, "test") display.vprint(VERB_DEBUG, "shouldn't see this") self.assertEqual(config.color, WHENCOLOR_CHOICES[2]) self.assertEqual(config.verbosity, VERB_STD) self.assertEqual(config.node_count, True) self.assertEqual(config.fanout, 42) self.assertEqual(config.connect_timeout, 14) self.assertEqual(config.command_timeout, 0) self.assertEqual(config.ssh_user, None) self.assertEqual(config.ssh_path, None) self.assertEqual(config.ssh_options, None) f.close() def testClushConfigFull(self): """test CLI.Config.ClushConfig (full)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] fanout: 42 connect_timeout: 14 command_timeout: 0 history_size: 100 color: auto node_count: yes verbosity: 1 ssh_user: root ssh_path: /usr/bin/ssh ssh_options: -oStrictHostKeyChecking=no """) f.flush() parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) self.assertEqual(config.color, WHENCOLOR_CHOICES[2]) self.assertEqual(config.verbosity, VERB_STD) self.assertEqual(config.node_count, True) self.assertEqual(config.fanout, 42) self.assertEqual(config.connect_timeout, 14) self.assertEqual(config.command_timeout, 0) self.assertEqual(config.ssh_user, "root") self.assertEqual(config.ssh_path, "/usr/bin/ssh") self.assertEqual(config.ssh_options, "-oStrictHostKeyChecking=no") f.close() def testClushConfigError(self): """test CLI.Config.ClushConfig (error)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] fanout: 3.2 connect_timeout: foo command_timeout: bar history_size: 100 color: maybe node_count: 3 verbosity: bar ssh_user: root ssh_path: /usr/bin/ssh ssh_options: -oStrictHostKeyChecking=no """) f.flush() parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) try: c = config.color self.fail("Exception ClushConfigError not raised (color)") except ClushConfigError: pass self.assertEqual(config.verbosity, 0) # probably for compatibility try: f = config.fanout self.fail("Exception ClushConfigError not raised (fanout)") except ClushConfigError: pass try: f = config.node_count self.fail("Exception ClushConfigError not raised (node_count)") except ClushConfigError: pass try: f = config.fanout except ClushConfigError, e: self.assertEqual(str(e)[0:20], "(Config Main.fanout)") try: t = config.connect_timeout self.fail("Exception ClushConfigError not raised (connect_timeout)") except ClushConfigError: pass try: m = config.command_timeout self.fail("Exception ClushConfigError not raised (command_timeout)") except ClushConfigError: pass f.close() def testClushConfigSetRlimit(self): """test CLI.Config.ClushConfig (setrlimit)""" soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) hard2 = min(32768, hard) f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] fanout: 42 connect_timeout: 14 command_timeout: 0 history_size: 100 color: auto fd_max: %d verbosity: 1 """ % hard2) f.flush() parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) display = Display(options, config) self.assert_(display != None) # force a lower soft limit resource.setrlimit(resource.RLIMIT_NOFILE, (hard2/2, hard)) # max_fdlimit should increase soft limit again set_fdlimit(config.fd_max, display) # verify soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) self.assertEqual(soft, hard2) f.close() def testClushConfigDefaultWithOptions(self): """test CLI.Config.ClushConfig (default with options)""" f = tempfile.NamedTemporaryFile(prefix='testclushconfig') f.write(""" [Main] fanout: 42 connect_timeout: 14 command_timeout: 0 history_size: 100 color: auto verbosity: 1 #ssh_user: root #ssh_path: /usr/bin/ssh #ssh_options: -oStrictHostKeyChecking=no """) f.flush() parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args(["-f", "36", "-u", "3", "-t", "7", "--user", "foobar", "--color", "always", "-d", "-v", "-q", "-o", "-oSomething"]) config = ClushConfig(options, filename=f.name) self.assert_(config != None) display = Display(options, config) self.assert_(display != None) display.vprint(VERB_STD, "test") display.vprint(VERB_DEBUG, "test") self.assertEqual(config.color, WHENCOLOR_CHOICES[1]) self.assertEqual(config.verbosity, VERB_DEBUG) # takes biggest self.assertEqual(config.fanout, 36) self.assertEqual(config.connect_timeout, 7) self.assertEqual(config.command_timeout, 3) self.assertEqual(config.ssh_user, "foobar") self.assertEqual(config.ssh_path, None) self.assertEqual(config.ssh_options, "-oSomething") f.close() def testClushConfigWithInstalledConfig(self): """test CLI.Config.ClushConfig (installed config required)""" # This test needs installed configuration files (needed for # maximum coverage). parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) parser.install_ssh_options() options, _ = parser.parse_args([]) config = ClushConfig(options) self.assert_(config != None) if __name__ == '__main__': suites = [unittest.TestLoader().loadTestsFromTestCase(CLIClushConfigTest)] unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites)) clustershell-1.6/tests/CLIOptionParserTest.py0000644000130500135250000000361311741571247020757 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.CLI.OptionParser test suite # Written by S. Thiell 2010-09-25 """Unit test for CLI.OptionParser""" from optparse import OptionConflictError import os import sys import tempfile import unittest sys.path.insert(0, '../lib') from ClusterShell.CLI.OptionParser import OptionParser class CLIOptionParserTest(unittest.TestCase): """This test case performs a complete CLI.OptionParser verification. """ def testOptionParser(self): """test CLI.OptionParser (1)""" parser = OptionParser("dummy") parser.install_nodes_options() parser.install_display_options(verbose_options=True) parser.install_filecopy_options() parser.install_ssh_options() options, _ = parser.parse_args([]) def testOptionParser2(self): """test CLI.OptionParser (2)""" parser = OptionParser("dummy") parser.install_nodes_options() parser.install_display_options(verbose_options=True, separator_option=True) parser.install_filecopy_options() parser.install_ssh_options() options, _ = parser.parse_args([]) def testOptionParserConflicts(self): """test CLI.OptionParser (conflicting options)""" parser = OptionParser("dummy") parser.install_nodes_options() parser.install_display_options(dshbak_compat=True) self.assertRaises(OptionConflictError, parser.install_filecopy_options) def testOptionParserClubak(self): """test CLI.OptionParser for clubak""" parser = OptionParser("dummy") parser.install_nodes_options() parser.install_display_options(separator_option=True, dshbak_compat=True) options, _ = parser.parse_args([]) if __name__ == '__main__': suites = [unittest.TestLoader().loadTestsFromTestCase(CLIOptionParserTest)] unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites)) clustershell-1.6/tests/TaskLocalTest.py0000644000130500135250000006451511741571247017667 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (local) test suite # Written by S. Thiell 2008-04-09 """Unit test for ClusterShell Task (local)""" import copy import os import signal import sys import time import unittest sys.path.insert(0, '../lib') import ClusterShell from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import * from ClusterShell.Worker.Worker import WorkerSimple, WorkerError from ClusterShell.Worker.Worker import WorkerBadArgumentError import socket import threading import tempfile def _test_print_debug(task, s): # Use custom task info (prefix 'user_' is recommended) task.set_info("user_print_debug_last", s) class TaskLocalTest(unittest.TestCase): def testSimpleCommand(self): """test simple command""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/hostname") self.assert_(worker != None) # run task task.resume() def testSimpleDualTask(self): """test simple task doing 2 sequential jobs""" task0 = task_self() self.assert_(task0 != None) worker1 = task0.shell("/bin/hostname") worker2 = task0.shell("/bin/uname -a") task0.resume() b1 = copy.copy(worker1.read()) b2 = copy.copy(worker2.read()) task1 = task_self() self.assert_(task1 is task0) worker1 = task1.shell("/bin/hostname") self.assert_(worker1 != None) worker2 = task1.shell("/bin/uname -a") self.assert_(worker2 != None) task1.resume() self.assert_(worker2.read() == b2) self.assert_(worker1.read() == b1) def testSimpleCommandNoneArgs(self): """test simple command with args=None""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/hostname", nodes=None, handler=None) self.assert_(worker != None) # run task task.resume() def testSimpleMultipleCommands(self): """test and verify results of 100 commands""" task = task_self() self.assert_(task != None) # run commands workers = [] for i in range(0, 100): workers.append(task.shell("/bin/hostname")) task.resume() # verify results hn = socket.gethostname() for i in range(0, 100): t_hn = workers[i].read().splitlines()[0] self.assertEqual(t_hn, hn) def testHugeOutputCommand(self): """test huge output command""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("python test_command.py --test huge --rc 0") self.assert_(worker != None) # run task task.resume() self.assertEqual(worker.retcode(), 0) self.assertEqual(len(worker.read()), 699999) # task configuration def testTaskInfo(self): """test task info""" task = task_self() self.assert_(task != None) fanout = task.info("fanout") self.assertEqual(fanout, Task._std_info["fanout"]) def testSimpleCommandTimeout(self): """test simple command timeout""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/sleep 30") self.assert_(worker != None) # run task self.assertRaises(TimeoutError, task.resume, 3) def testSimpleCommandNoTimeout(self): """test simple command exiting before timeout""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/sleep 3") self.assert_(worker != None) try: # run task task.resume(5) except TimeoutError: self.fail("did detect timeout") def testSimpleCommandNoTimeout(self): """test simple command exiting just before timeout""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/usleep 900000") self.assert_(worker != None) try: # run task task.resume(1) except TimeoutError: self.fail("did detect timeout") def testWorkersTimeout(self): """test workers with timeout""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/sleep 6", timeout=3) self.assert_(worker != None) worker = task.shell("/bin/sleep 6", timeout=2) self.assert_(worker != None) try: # run task task.resume() except TimeoutError: self.fail("did detect timeout") self.assert_(worker.did_timeout()) def testWorkersTimeout2(self): """test workers with timeout (more)""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/sleep 10", timeout=5) self.assert_(worker != None) worker = task.shell("/bin/sleep 10", timeout=3) self.assert_(worker != None) try: # run task task.resume() except TimeoutError: self.fail("did detect task timeout") def testWorkersAndTaskTimeout(self): """test task and workers with timeout""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/sleep 10", timeout=5) self.assert_(worker != None) worker = task.shell("/bin/sleep 10", timeout=3) self.assert_(worker != None) self.assertRaises(TimeoutError, task.resume, 2) def testLocalEmptyBuffer(self): """test task local empty buffer""" task = task_self() self.assert_(task != None) task.shell("true", key="empty") task.resume() self.assertEqual(task.key_buffer("empty"), '') for buf, keys in task.iter_buffers(): self.assert_(False) def testLocalEmptyError(self): """test task local empty error buffer""" task = task_self() self.assert_(task != None) task.shell("true", key="empty") task.resume() self.assertEqual(task.key_error("empty"), '') for buf, keys in task.iter_errors(): self.assert_(False) def testTaskKeyErrors(self): """test some task methods raising KeyError""" task = task_self() self.assert_(task != None) task.shell("true", key="dummy") task.resume() # task.key_retcode raises KeyError self.assertRaises(KeyError, task.key_retcode, "not_known") # unlike task.key_buffer/error self.assertEqual(task.key_buffer("not_known"), '') self.assertEqual(task.key_error("not_known"), '') def testLocalSingleLineBuffers(self): """test task local single line buffers gathering""" task = task_self() self.assert_(task != None) task.shell("/bin/echo foo", key="foo") task.shell("/bin/echo bar", key="bar") task.shell("/bin/echo bar", key="bar2") task.shell("/bin/echo foobar", key="foobar") task.shell("/bin/echo foobar", key="foobar2") task.shell("/bin/echo foobar", key="foobar3") task.resume() self.assert_(task.key_buffer("foobar") == "foobar") cnt = 3 for buf, keys in task.iter_buffers(): cnt -= 1 if buf == "foo": self.assertEqual(len(keys), 1) self.assertEqual(keys[0], "foo") elif buf == "bar": self.assertEqual(len(keys), 2) self.assert_(keys[0] == "bar" or keys[1] == "bar") elif buf == "foobar": self.assertEqual(len(keys), 3) self.assertEqual(cnt, 0) def testLocalBuffers(self): """test task local multi-lines buffers gathering""" task = task_self() self.assert_(task != None) task.shell("/usr/bin/printf 'foo\nbar\n'", key="foobar") task.shell("/usr/bin/printf 'foo\nbar\n'", key="foobar2") task.shell("/usr/bin/printf 'foo\nbar\n'", key="foobar3") task.shell("/usr/bin/printf 'foo\nbar\nxxx\n'", key="foobarX") task.shell("/usr/bin/printf 'foo\nfuu\n'", key="foofuu") task.shell("/usr/bin/printf 'faa\nber\n'", key="faaber") task.shell("/usr/bin/printf 'foo\nfuu\n'", key="foofuu2") task.resume() cnt = 4 for buf, keys in task.iter_buffers(): cnt -= 1 if buf == "faa\nber\n": self.assertEqual(len(keys), 1) self.assert_(keys[0].startswith("faaber")) elif buf == "foo\nfuu\n": self.assertEqual(len(keys), 2) self.assert_(keys[0].startswith("foofuu")) elif buf == "foo\nbar\n": self.assertEqual(len(keys), 3) elif buf == "foo\nbar\nxxx\n": self.assertEqual(len(keys), 1) self.assert_(keys[0].startswith("foobarX")) self.assert_(keys[0].startswith("foobar")) elif buf == "foo\nbar\nxxx\n": self.assertEqual(len(keys), 1) self.assert_(keys[0].startswith("foobarX")) self.assertEqual(cnt, 0) def testLocalRetcodes(self): """test task with local return codes""" task = task_self() self.assert_(task != None) # 0 ['worker0'] # 1 ['worker1'] # 2 ['worker2'] # 3 ['worker3bis', 'worker3'] # 4 ['worker4'] # 5 ['worker5bis', 'worker5'] task.shell("true", key="worker0") task.shell("false", key="worker1") task.shell("/bin/sh -c 'exit 1'", key="worker1bis") task.shell("/bin/sh -c 'exit 2'", key="worker2") task.shell("/bin/sh -c 'exit 3'", key="worker3") task.shell("/bin/sh -c 'exit 3'", key="worker3bis") task.shell("/bin/sh -c 'exit 4'", key="worker4") task.shell("/bin/sh -c 'exit 1'", key="worker4") task.shell("/bin/sh -c 'exit 5'", key="worker5") task.shell("/bin/sh -c 'exit 5'", key="worker5bis") task.resume() # test key_retcode(key) self.assertEqual(task.key_retcode("worker2"), 2) # single self.assertEqual(task.key_retcode("worker4"), 4) # multiple self.assertRaises(KeyError, task.key_retcode, "worker9") # error cnt = 6 for rc, keys in task.iter_retcodes(): cnt -= 1 if rc == 0: self.assertEqual(len(keys), 1) self.assert_(keys[0] == "worker0" ) elif rc == 1: self.assertEqual(len(keys), 3) self.assert_(keys[0] in ("worker1", "worker1bis", "worker4")) elif rc == 2: self.assertEqual(len(keys), 1) self.assert_(keys[0] == "worker2" ) elif rc == 3: self.assertEqual(len(keys), 2) self.assert_(keys[0] in ("worker3", "worker3bis")) elif rc == 4: self.assertEqual(len(keys), 1) self.assert_(keys[0] == "worker4" ) elif rc == 5: self.assertEqual(len(keys), 2) self.assert_(keys[0] in ("worker5", "worker5bis")) self.assertEqual(cnt, 0) # test max retcode API self.assertEqual(task.max_retcode(), 5) def testCustomPrintDebug(self): """test task with custom print debug callback""" task = task_self() self.assert_(task != None) # first test that simply changing print_debug doesn't enable debug default_print_debug = task.info("print_debug") try: task.set_info("print_debug", _test_print_debug) task.shell("true") task.resume() self.assertEqual(task.info("user_print_debug_last"), None) # with debug enabled, it should work task.set_info("debug", True) task.shell("true") task.resume() self.assertEqual(task.info("user_print_debug_last"), "POPEN: true") # remove debug task.set_info("debug", False) # re-run for default print debug callback code coverage task.shell("true") task.resume() finally: # restore default print_debug task.set_info("debug", False) task.set_info("print_debug", default_print_debug) def testLocalRCBufferGathering(self): """test task local rc+buffers gathering""" task = task_self() self.assert_(task != None) task.shell("/usr/bin/printf 'foo\nbar\n' && exit 1", key="foobar5") task.shell("/usr/bin/printf 'foo\nbur\n' && exit 1", key="foobar2") task.shell("/usr/bin/printf 'foo\nbar\n' && exit 1", key="foobar3") task.shell("/usr/bin/printf 'foo\nfuu\n' && exit 5", key="foofuu") task.shell("/usr/bin/printf 'foo\nbar\n' && exit 4", key="faaber") task.shell("/usr/bin/printf 'foo\nfuu\n' && exit 1", key="foofuu2") task.resume() foobur = "foo\nbur" cnt = 5 for rc, keys in task.iter_retcodes(): for buf, keys in task.iter_buffers(keys): cnt -= 1 if buf == "foo\nbar": self.assert_(rc == 1 or rc == 4) elif foobur == buf: self.assertEqual(rc, 1) elif "foo\nfuu" == buf: self.assert_(rc == 1 or rc == 5) else: self.fail("invalid buffer returned") self.assertEqual(cnt, 0) def testLocalBufferRCGathering(self): """test task local buffers+rc gathering""" task = task_self() self.assert_(task != None) task.shell("/usr/bin/printf 'foo\nbar\n' && exit 1", key="foobar5") task.shell("/usr/bin/printf 'foo\nbur\n' && exit 1", key="foobar2") task.shell("/usr/bin/printf 'foo\nbar\n' && exit 1", key="foobar3") task.shell("/usr/bin/printf 'foo\nfuu\n' && exit 5", key="foofuu") task.shell("/usr/bin/printf 'foo\nbar\n' && exit 4", key="faaber") task.shell("/usr/bin/printf 'foo\nfuu\n' && exit 1", key="foofuu2") task.resume() cnt = 9 for buf, keys in task.iter_buffers(): for rc, keys in task.iter_retcodes(keys): # same checks as testLocalRCBufferGathering cnt -= 1 if buf == "foo\nbar\n": self.assert_(rc == 1 and rc == 4) elif buf == "foo\nbur\n": self.assertEqual(rc, 1) elif buf == "foo\nbuu\n": self.assertEqual(rc, 5) self.assertEqual(cnt, 0) def testLocalWorkerWrites(self): """test worker writes (i)""" # Simple test: we write to a cat process and see if read matches. task = task_self() self.assert_(task != None) worker = task.shell("cat") # write first line worker.write("foobar\n") # write second line worker.write("deadbeaf\n") worker.set_write_eof() task.resume() self.assertEqual(worker.read(), "foobar\ndeadbeaf") def testLocalWorkerWritesBcExample(self): """test worker writes (ii)""" # Other test: write a math statement to a bc process and check # for the result. task = task_self() self.assert_(task != None) worker = task.shell("bc -q") # write statement worker.write("2+2\n") worker.set_write_eof() # execute task.resume() # read result self.assertEqual(worker.read(), "4") def testEscape(self): """test local worker (ssh) cmd with escaped variable""" task = task_self() self.assert_(task != None) worker = task.shell("export CSTEST=foobar; /bin/echo \$CSTEST | sed 's/\ foo/bar/'") # execute task.resume() # read result self.assertEqual(worker.read(), "$CSTEST") def testEscape2(self): """test local worker (ssh) cmd with non-escaped variable""" task = task_self() self.assert_(task != None) worker = task.shell("export CSTEST=foobar; /bin/echo $CSTEST | sed 's/\ foo/bar/'") # execute task.resume() # read result self.assertEqual(worker.read(), "foobar") def testEngineClients(self): """test Engine.clients() [private]""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/hostname") self.assert_(worker != None) self.assertEqual(len(task._engine.clients()), 1) task.resume() def testEnginePorts(self): """test Engine.ports() [private]""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/hostname") self.assert_(worker != None) self.assertEqual(len(task._engine.ports()), 1) task.resume() def testSimpleCommandAutoclose(self): """test simple command (autoclose)""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/sleep 3; /bin/uname", autoclose=True) self.assert_(worker != None) task.resume() self.assertEqual(worker.read(), None) def testTwoSimpleCommandsAutoclose(self): """test two simple commands (one autoclosing)""" task = task_self() self.assert_(task != None) worker1 = task.shell("/bin/sleep 2; /bin/echo ok") worker2 = task.shell("/bin/sleep 3; /bin/uname", autoclose=True) self.assert_(worker2 != None) task.resume() self.assertEqual(worker1.read(), "ok") self.assertEqual(worker2.read(), None) def testLocalWorkerErrorBuffers(self): """test task local stderr worker buffers""" task = task_self() self.assert_(task != None) w1 = task.shell("/usr/bin/printf 'foo bar\n' 1>&2", key="foobar", stderr=True) w2 = task.shell("/usr/bin/printf 'foo\nbar\n' 1>&2", key="foobar2", stderr=True) task.resume() self.assertEqual(w1.error(), 'foo bar') self.assertEqual(w2.error(), 'foo\nbar') def testLocalErrorBuffers(self): """test task local stderr buffers gathering""" task = task_self() self.assert_(task != None) task.shell("/usr/bin/printf 'foo\nbar\n' 1>&2", key="foobar", stderr=True) task.shell("/usr/bin/printf 'foo\nbar\n' 1>&2", key="foobar2", stderr=True) task.shell("/usr/bin/printf 'foo\nbar\n 1>&2'", key="foobar3", stderr=True) task.shell("/usr/bin/printf 'foo\nbar\nxxx\n' 1>&2", key="foobarX", stderr=True) task.shell("/usr/bin/printf 'foo\nfuu\n' 1>&2", key="foofuu", stderr=True) task.shell("/usr/bin/printf 'faa\nber\n' 1>&2", key="faaber", stderr=True) task.shell("/usr/bin/printf 'foo\nfuu\n' 1>&2", key="foofuu2", stderr=True) task.resume() cnt = 4 for buf, keys in task.iter_errors(): cnt -= 1 if buf == "faa\nber\n": self.assertEqual(len(keys), 1) self.assert_(keys[0].startswith("faaber")) elif buf == "foo\nfuu\n": self.assertEqual(len(keys), 2) self.assert_(keys[0].startswith("foofuu")) elif buf == "foo\nbar\n": self.assertEqual(len(keys), 3) self.assert_(keys[0].startswith("foobar")) elif buf == "foo\nbar\nxxx\n": self.assertEqual(len(keys), 1) self.assert_(keys[0].startswith("foobarX")) self.assertEqual(cnt, 0) def testTaskPrintDebug(self): """test task default print_debug""" task = task_self() self.assert_(task != None) # simple test, just run a task with debug on to improve test # code coverage task.set_info("debug", True) worker = task.shell("/bin/echo test") self.assert_(worker != None) task.resume() task.set_info("debug", False) def testTaskAbortSelf(self): """test task abort self (outside handler)""" task = task_self() self.assert_(task != None) # abort(False) keeps current task_self() object task.abort() self.assert_(task == task_self()) # abort(True) unbinds current task_self() object task.abort(True) self.assert_(task != task_self()) # retry task = task_self() self.assert_(task != None) worker = task.shell("/bin/echo shouldnt see that") task.abort() self.assert_(task == task_self()) def testTaskAbortHandler(self): """test task abort self (inside handler)""" class AbortOnReadTestHandler(EventHandler): def ev_read(self, worker): self.has_ev_read = True worker.task.abort() assert False, "Shouldn't reach this line" task = task_self() self.assert_(task != None) eh = AbortOnReadTestHandler() eh.has_ev_read = False task.shell("/bin/echo test", handler=eh) task.resume() self.assert_(eh.has_ev_read) def testWorkerSetKey(self): """test worker set_key()""" task = task_self() self.assert_(task != None) task.shell("/bin/echo foo", key="foo") worker = task.shell("/bin/echo foobar") worker.set_key("bar") task.resume() self.assert_(task.key_buffer("bar") == "foobar") def testWorkerSimpleStdin(self): """test WorkerSimple (stdin)""" task = task_self() self.assert_(task != None) file_reader = sys.stdin worker = WorkerSimple(file_reader, None, None, "stdin", None, 0, True) self.assert_(worker != None) task.schedule(worker) task.resume() # FIXME: reconsider this kind of test (which now must fail) especially # when using epoll engine, as soon as testsuite is improved (#95). #def testWorkerSimpleFile(self): # """test WorkerSimple (file)""" # task = task_self() # self.assert_(task != None) # # use tempfile # tmpfile = tempfile.TemporaryFile() # tmpfile.write("one line without EOL") # tmpfile.seek(0) # worker = WorkerSimple(tmpfile, None, None, "file", None, 0, True) # self.assert_(worker != None) # task.schedule(worker) # task.resume() # self.assertEqual(worker.read(), "one line without EOL") def testInterruptEngine(self): """test Engine signal interruption""" class KillerThread(threading.Thread): def run(self): time.sleep(1) os.kill(self.pidkill, signal.SIGUSR1) task_wait() kth = KillerThread() kth.pidkill = os.getpid() task = task_self() self.assert_(task != None) signal.signal(signal.SIGUSR1, lambda x, y: None) task.shell("/bin/sleep 2", timeout=5) kth.start() task.resume() def testShellDelayedIO(self): """test delayed io in event handler""" class TestDelayedHandler(EventHandler): def __init__(self, target_worker=None): self.target_worker = target_worker self.counter = 0 def ev_read(self, worker): self.counter += 1 if self.counter == 100: worker.write("another thing to read\n") worker.set_write_eof() def ev_timer(self, timer): self.target_worker.write("something to read\n" * 300) task = task_self() hdlr = TestDelayedHandler() reader = task.shell("cat", handler=hdlr) timer = task.timer(0.6, handler=TestDelayedHandler(reader)) task.resume() self.assertEqual(hdlr.counter, 301) def testSimpleCommandReadNoEOL(self): """test simple command read without EOL""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("/bin/echo -n okay") self.assert_(worker != None) # run task task.resume() self.assertEqual(worker.read(), "okay") def testLocalFanout(self): """test local task fanout""" task = task_self() self.assert_(task != None) fanout = task.info("fanout") try: task.set_info("fanout", 3) # Test #1: simple for i in range(0, 10): worker = task.shell("/bin/echo test %d" % i) self.assert_(worker != None) task.resume() # Test #2: fanout change during run class TestFanoutChanger(EventHandler): def ev_timer(self, timer): task_self().set_info("fanout", 1) timer = task.timer(2.0, handler=TestFanoutChanger()) for i in range(0, 10): worker = task.shell("/bin/echo sleep 1") self.assert_(worker != None) task.resume() finally: # restore original fanout value task.set_info("fanout", fanout) def testPopenBadArgumentOption(self): """test WorkerPopen constructor bad argument""" # Check code < 1.4 compatibility self.assertRaises(WorkerBadArgumentError, WorkerPopen, None, None) # As of 1.4, ValueError is raised for missing parameter self.assertRaises(ValueError, WorkerPopen, None, None) # 1.4+ def testWorkerAbort(self): """test local Worker abort() on timer""" task = task_self() self.assert_(task != None) class AbortOnTimer(EventHandler): def __init__(self, worker): EventHandler.__init__(self) self.ext_worker = worker self.testtimer = False def ev_timer(self, timer): self.ext_worker.abort() self.testtimer = True aot = AbortOnTimer(task.shell("sleep 10")) self.assertEqual(aot.testtimer, False) task.timer(1.0, handler=aot) task.resume() self.assertEqual(aot.testtimer, True) def testWorkerAbortSanity(self): """test local Worker abort() (sanity)""" task = task_self() worker = task.shell("sleep 1") worker.abort() # test noop abort() on unscheduled worker worker = WorkerPopen("sleep 1") worker.abort() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskLocalTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/NodeSetErrorTest.py0000644000130500135250000000531111741571247020352 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.NodeSet.NodeSet error handling test suite # Written by S. Thiell 2008-09-28 """Unit test for RangeSet errors""" import copy import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.NodeSet import NodeSet from ClusterShell.NodeSet import NodeSetParseError from ClusterShell.NodeSet import NodeSetParseRangeError class NodeSetErrorTest(unittest.TestCase): def _testNS(self, pattern, expected_exc): try: nodeset = NodeSet(pattern) print nodeset except NodeSetParseError, e: self.assertEqual(e.__class__, expected_exc) return except: raise self.assert_(0, "error not detected/no exception raised [pattern=%s]" % pattern) def testBadRangeUsages(self): """test NodeSet parse errors in range""" self._testNS("", NodeSetParseError) self._testNS("nova[]", NodeSetParseRangeError) self._testNS("nova[-]", NodeSetParseRangeError) self._testNS("nova[A]", NodeSetParseRangeError) self._testNS("nova[2-5/a]", NodeSetParseRangeError) self._testNS("nova[3/2]", NodeSetParseRangeError) self._testNS("nova[3-/2]", NodeSetParseRangeError) self._testNS("nova[-3/2]", NodeSetParseRangeError) self._testNS("nova[-/2]", NodeSetParseRangeError) self._testNS("nova[4-a/2]", NodeSetParseRangeError) self._testNS("nova[4-3/2]", NodeSetParseRangeError) self._testNS("nova[4-5/-2]", NodeSetParseRangeError) self._testNS("nova[4-2/-2]", NodeSetParseRangeError) self._testNS("nova[004-002]", NodeSetParseRangeError) self._testNS("nova[3-59/2,102a]", NodeSetParseRangeError) self._testNS("nova[3-59/2,,102]", NodeSetParseRangeError) self._testNS("nova%s" % ("3" * 101), NodeSetParseRangeError) def testBadUsages(self): """test NodeSet other parse errors""" self._testNS("nova[3-59/2,102", NodeSetParseError) self._testNS("nova3,nova4,,nova6", NodeSetParseError) self._testNS("nova3,nova4,5,nova6", NodeSetParseError) self._testNS("nova3,nova4,[5-8],nova6", NodeSetParseError) self._testNS("nova6,", NodeSetParseError) self._testNS("nova6[", NodeSetParseError) #self._testNS("nova6]", NodeSetParseError) #self._testNS("nova%s", NodeSetParseError) def testTypeSanityCheck(self): """test NodeSet input type sanity check""" self.assertRaises(TypeError, NodeSet, dict()) self.assertRaises(TypeError, NodeSet, list()) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(NodeSetErrorTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskThreadJoinTest.py0000644000130500135250000000737311741571247020663 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2010-01-16 """Unit test for ClusterShell task's join feature in multithreaded environments""" import sys import time import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import * from ClusterShell.Event import EventHandler class TaskThreadJoinTest(unittest.TestCase): def tearDown(self): task_cleanup() def testThreadTaskWaitWhenRunning(self): """test task_wait() when workers are running""" for i in range(1, 5): task = Task() task.shell("sleep %d" % i) task.resume() task_wait() def testThreadTaskWaitWhenSomeFinished(self): """test task_wait() when some workers finished""" for i in range(1, 5): task = Task() task.shell("sleep %d" % i) task.resume() time.sleep(2) task_wait() def testThreadTaskWaitWhenAllFinished(self): """test task_wait() when all workers finished""" for i in range(1, 3): task = Task() task.shell("sleep %d" % i) task.resume() time.sleep(4) task_wait() def testThreadSimpleTaskSupervisor(self): """test task methods from another thread""" #print "PASS 1" task = Task() task.shell("sleep 3") task.shell("echo testing", key=1) task.resume() task.join() self.assertEqual(task.key_buffer(1), "testing") #print "PASS 2" task.shell("echo ok", key=2) task.resume() task.join() #print "PASS 3" self.assertEqual(task.key_buffer(2), "ok") task.shell("sleep 1 && echo done", key=3) task.resume() task.join() #print "PASS 4" self.assertEqual(task.key_buffer(3), "done") task.abort() def testThreadTaskBuffers(self): """test task data access methods after join()""" task = Task() # test data access from main thread # test stderr separated task.set_default("stderr", True) task.shell("echo foobar", key="OUT") task.shell("echo raboof 1>&2", key="ERR") task.resume() task.join() self.assertEqual(task.key_buffer("OUT"), "foobar") self.assertEqual(task.key_error("OUT"), "") self.assertEqual(task.key_buffer("ERR"), "") self.assertEqual(task.key_error("ERR"), "raboof") # test stderr merged task.set_default("stderr", False) task.shell("echo foobar", key="OUT") task.shell("echo raboof 1>&2", key="ERR") task.resume() task.join() self.assertEqual(task.key_buffer("OUT"), "foobar") self.assertEqual(task.key_error("OUT"), "") self.assertEqual(task.key_buffer("ERR"), "raboof") self.assertEqual(task.key_error("ERR"), "") def testThreadTaskUnhandledException(self): """test task unhandled exception in thread""" class TestUnhandledException(Exception): """test exception""" class RaiseOnRead(EventHandler): def ev_read(self, worker): raise TestUnhandledException("you should see this exception") task = Task() # test data access from main thread task.shell("echo raisefoobar", key=1, handler=RaiseOnRead()) task.resume() task.join() self.assertEqual(task.key_buffer(1), "raisefoobar") time.sleep(1) # for pretty display, because unhandled exception # traceback may be sent to stderr after the join() self.assert_(not task.running()) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskThreadJoinTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskEventTest.py0000644000130500135250000001177511741571247017716 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (local) test suite # Written by S. Thiell 2008-04-09 """Unit test for ClusterShell Task (event-based mode)""" import copy import sys import unittest sys.path.insert(0, '../lib') import ClusterShell from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import * from ClusterShell.Event import EventHandler import socket import thread class TestHandler(EventHandler): def __init__(self): self.reset_asserts() def do_asserts_read_notimeout(self): assert self.did_start, "ev_start not called" assert self.did_read, "ev_read not called" assert not self.did_readerr, "ev_error called" assert self.did_close, "ev_close not called" assert not self.did_timeout, "ev_timeout called" def do_asserts_timeout(self): assert self.did_start, "ev_start not called" assert not self.did_read, "ev_read called" assert not self.did_readerr, "ev_error called" assert self.did_close, "ev_close not called" assert self.did_timeout, "ev_timeout not called" def reset_asserts(self): self.did_start = False self.did_open = False self.did_read = False self.did_readerr = False self.did_close = False self.did_timeout = False def ev_start(self, worker): self.did_start = True def ev_read(self, worker): self.did_read = True assert worker.last_read() == "abcdefghijklmnopqrstuvwxyz" assert worker.last_error() != "abcdefghijklmnopqrstuvwxyz" def ev_error(self, worker): self.did_readerr = True assert worker.last_error() == "errerrerrerrerrerrerrerr" assert worker.last_read() != "errerrerrerrerrerrerrerr" def ev_close(self, worker): self.did_close = True if worker.read(): assert worker.read().startswith("abcdefghijklmnopqrstuvwxyz") def ev_timeout(self, worker): self.did_timeout = True class AbortOnReadHandler(EventHandler): def ev_read(self, worker): worker.abort() class TaskEventTest(unittest.TestCase): def testSimpleEventHandler(self): """test simple event handler""" task = task_self() self.assert_(task != None) eh = TestHandler() # init worker worker = task.shell("./test_command.py --test=cmp_out", handler=eh) self.assert_(worker != None) # run task task.resume() eh.do_asserts_read_notimeout() eh.reset_asserts() # re-test # init worker worker = task.shell("./test_command.py --test=cmp_out", handler=eh) self.assert_(worker != None) # run task task.resume() eh.do_asserts_read_notimeout() eh.reset_asserts() def testSimpleEventHandlerWithTaskTimeout(self): """test simple event handler with timeout""" task = task_self() self.assert_(task != None) eh = TestHandler() # init worker worker = task.shell("/bin/sleep 3", handler=eh) self.assert_(worker != None) try: task.resume(2) except TimeoutError: pass else: self.fail("did not detect timeout") eh.do_asserts_timeout() class TInFlyAdder(EventHandler): def ev_read(self, worker): assert worker.task.running() # in-fly workers addition other1 = worker.task.shell("/bin/sleep 1") assert other1 != None other2 = worker.task.shell("/bin/sleep 1") assert other2 != None def testEngineInFlyAdd(self): """test client add while running (in-fly add)""" task = task_self() self.assert_(task != None) eh = self.__class__.TInFlyAdder() worker = task.shell("/bin/uname", handler=eh) self.assert_(worker != None) task.resume() class TWriteOnStart(EventHandler): def ev_start(self, worker): assert worker.task.running() worker.write("foo bar\n") def ev_read(self, worker): assert worker.current_msg == "foo bar" worker.abort() def testWriteOnStartEvent(self): """test write on ev_start""" task = task_self() self.assert_(task != None) task.shell("cat", handler=self.__class__.TWriteOnStart()) task.resume() def testEngineMayReuseFD(self): """test write + worker.abort() on read to reuse FDs""" task = task_self() fanout = task.info("fanout") try: task.set_info("fanout", 1) eh = AbortOnReadHandler() for i in range(10): worker = task.shell("echo ok; sleep 1", handler=eh) worker.write("OK\n") self.assert_(worker is not None) task.resume() finally: task.set_info("fanout", fanout) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskEventTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/MisusageTest.py0000644000130500135250000000361611741571247017562 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2010-02-19 """Unit test for ClusterShell common library misusages""" import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.Event import EventHandler from ClusterShell.Worker.Popen import WorkerPopen from ClusterShell.Worker.Ssh import WorkerSsh from ClusterShell.Worker.Worker import WorkerError from ClusterShell.Task import Task, task_self, AlreadyRunningError class MisusageTest(unittest.TestCase): def testTaskResumedTwice(self): """test library misusage (task_self resumed twice)""" class ResumeAgainHandler(EventHandler): def ev_read(self, worker): worker.task.resume() task = task_self() task.shell("/bin/echo OK", handler=ResumeAgainHandler()) self.assertRaises(AlreadyRunningError, task.resume) def testWorkerNotScheduledLocal(self): """test library misusage (local worker not scheduled)""" task = task_self() worker = WorkerPopen(command="/bin/hostname") task.resume() self.assertRaises(WorkerError, worker.read) def testWorkerNotScheduledDistant(self): """test library misusage (distant worker not scheduled)""" task = task_self() worker = WorkerSsh("localhost", command="/bin/hostname", handler=None, timeout=0) self.assert_(worker != None) task.resume() self.assertRaises(WorkerError, worker.node_buffer, "localhost") def testTaskScheduleTwice(self): """test task worker schedule twice error""" task = task_self() self.assert_(task != None) worker = task.shell("/bin/echo itsme") self.assertRaises(WorkerError, task.schedule, worker) task.abort() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(MisusageTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskMsgTreeTest.py0000644000130500135250000000663511741571247020202 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2010-02-18 """Unit test for ClusterShell TaskMsgTree variants""" import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import Task, TaskMsgTreeError from ClusterShell.Task import task_cleanup, task_self class TaskMsgTreeTest(unittest.TestCase): def tearDown(self): # cleanup task_self between tests to restore defaults task_cleanup() def testEnabledMsgTree(self): """test TaskMsgTree enabled""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar") self.assert_(worker != None) task.set_default('stdout_msgtree', True) # run task task.resume() # should not raise for buf, keys in task.iter_buffers(): pass def testDisabledMsgTree(self): """test TaskMsgTree disabled""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar2") self.assert_(worker != None) task.set_default('stdout_msgtree', False) # run task task.resume() self.assertRaises(TaskMsgTreeError, task.iter_buffers) def testEnabledMsgTreeStdErr(self): """test TaskMsgTree enabled for stderr""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar 1>&2", stderr=True) worker = task.shell("echo just foo bar", stderr=True) self.assert_(worker != None) task.set_default('stderr_msgtree', True) # run task task.resume() # should not raise: for buf, keys in task.iter_errors(): pass # this neither: for buf, keys in task.iter_buffers(): pass def testDisabledMsgTreeStdErr(self): """test TaskMsgTree disabled for stderr""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar2 1>&2", stderr=True) worker = task.shell("echo just foo bar2", stderr=True) self.assert_(worker != None) task.set_default('stderr_msgtree', False) # run task task.resume() # should not raise: for buf, keys in task.iter_buffers(): pass # but this should: self.assertRaises(TaskMsgTreeError, task.iter_errors) def testTaskFlushBuffers(self): """test Task.flush_buffers""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar") self.assert_(worker != None) task.set_default('stdout_msgtree', True) # run task task.resume() task.flush_buffers() self.assertEqual(len(list(task.iter_buffers())), 0) def testTaskFlushErrors(self): """test Task.flush_errors""" task = task_self() self.assert_(task != None) # init worker worker = task.shell("echo foo bar 1>&2") self.assert_(worker != None) task.set_default('stderr_msgtree', True) # run task task.resume() task.flush_errors() self.assertEqual(len(list(task.iter_errors())), 0) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskMsgTreeTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskTimeoutTest.py0000644000130500135250000000216511741571247020254 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (local) test suite # Written by S. Thiell 2009-02-09 """Unit test for ClusterShell Task/Worker timeout support""" import copy import sys import unittest sys.path.insert(0, '../lib') import ClusterShell from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import * import socket import thread class TaskTimeoutTest(unittest.TestCase): def testWorkersTimeoutBuffers(self): """test worker buffers with timeout""" task = task_self() self.assert_(task != None) worker = task.shell("python test_command.py --timeout=10", timeout=4) self.assert_(worker != None) task.resume() self.assertEqual(worker.read(), """some buffer here...""") test = 1 for buf, keys in task.iter_buffers(): test = 0 self.assertEqual(buf, """some buffer here...""") self.assertEqual(test, 0, "task.iter_buffers() did not work") if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskTimeoutTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/CLIClubakTest.py0000644000130500135250000001650411741571247017536 0ustar thiellgpocre#!/usr/bin/env python # scripts/clubak.py tool test suite # Written by S. Thiell 2012-03-22 """Unit test for CLI/Clubak.py""" import sys import unittest from TLib import * from ClusterShell.CLI.Clubak import main class CLIClubakTest(unittest.TestCase): """Unit test class for testing CLI/Clubak.py""" def _clubak_t(self, args, input, expected_stdout, expected_rc=0, expected_stderr=None): CLI_main(self, main, [ 'clubak' ] + args, input, expected_stdout, expected_rc, expected_stderr) def test_000_noargs(self): """test clubak (no argument)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t([], "foo: bar\n", outfmt % "foo") self._clubak_t([], "foo space: bar\n", outfmt % "foo space") self._clubak_t([], "foo space1: bar\n", outfmt % "foo space1") self._clubak_t([], "foo space1: bar\nfoo space2: bar", outfmt % "foo space1" + outfmt % "foo space2") self._clubak_t([], ": bar\n", "", 1, "clubak: no node found (\": bar\")\n") self._clubak_t([], "foo[: bar\n", outfmt % "foo[") self._clubak_t([], "]o[o]: bar\n", outfmt % "]o[o]") self._clubak_t([], "foo:\n", "---------------\nfoo\n---------------\n\n") self._clubak_t([], "foo: \n", "---------------\nfoo\n---------------\n \n") def test_001_verbosity(self): """test clubak (-q/-v/-d)""" outfmt = "INPUT foo: bar\n---------------\n%s\n---------------\n bar\n" self._clubak_t(["-d"], "foo: bar\n", outfmt % "foo", 0, "line_mode=False gather=False tree_depth=1\n") self._clubak_t(["-d", "-b"], "foo: bar\n", outfmt % "foo", 0, "line_mode=False gather=True tree_depth=1\n") self._clubak_t(["-d", "-L"], "foo: bar\n", "INPUT foo: bar\nfoo: bar\n", 0, "line_mode=True gather=False tree_depth=1\n") self._clubak_t(["-v"], "foo: bar\n", outfmt % "foo", 0) self._clubak_t(["-v", "-b"], "foo: bar\n", outfmt % "foo", 0) outfmt = "---------------\n%s\n---------------\n bar\n" # no node count with -q self._clubak_t(["-q", "-b"], "foo[1-5]: bar\n", outfmt % "foo[1-5]", 0) def test_002_b(self): """test clubak (gather -b)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t(["-b"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b"], "foo space: bar\n", outfmt % "foo space") self._clubak_t(["-b"], "foo space1: bar\n", outfmt % "foo space1") self._clubak_t(["-b"], "foo space1: bar\nfoo space2: bar", outfmt % "foo space[1-2] (2)") self._clubak_t(["-b"], "foo space1: bar\nfoo space2: foo", "---------------\nfoo space1\n---------------\n bar\n---------------\nfoo space2\n---------------\n foo\n") self._clubak_t(["-b"], ": bar\n", "", 1, "clubak: no node found (\": bar\")\n") self._clubak_t(["-b"], "foo[: bar\n", outfmt % "foo[") self._clubak_t(["-b"], "]o[o]: bar\n", outfmt % "]o[o]") self._clubak_t(["-b"], "foo:\n", "---------------\nfoo\n---------------\n\n") self._clubak_t(["-b"], "foo: \n", "---------------\nfoo\n---------------\n \n") def test_003_L(self): """test clubak (line mode -L)""" self._clubak_t(["-L"], "foo: bar\n", "foo: bar\n") self._clubak_t(["-L", "-S", ": "], "foo: bar\n", "foo: bar\n") self._clubak_t(["-bL"], "foo: bar\n", "foo: bar\n") self._clubak_t(["-bL", "-S", ": "], "foo: bar\n", "foo: bar\n") def test_004_N(self): """test clubak (no header -N)""" self._clubak_t(["-N"], "foo: bar\n", "\n bar\n") self._clubak_t(["-NL"], "foo: bar\n", " bar\n") self._clubak_t(["-N", "-S", ": "], "foo: bar\n", "\nbar\n") self._clubak_t(["-bN"], "foo: bar\n", "\n bar\n") self._clubak_t(["-bN", "-S", ": "], "foo: bar\n", "\nbar\n") def test_005_fast(self): """test clubak (fast mode --fast)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t(["--fast"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--fast"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--fast"], "foo2: bar\nfoo1: bar\nfoo4: bar", outfmt % "foo[1-2,4] (3)") # check conflicting options self._clubak_t(["-L", "--fast"], "foo2: bar\nfoo1: bar\nfoo4: bar", '', 2, "error: incompatible tree options\n") def test_006_tree(self): """test clubak (tree mode --tree)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t(["--tree"], "foo: bar\n", outfmt % "foo") self._clubak_t(["--tree", "-L"], "foo: bar\n", "foo:\n bar\n") input = """foo1:bar foo2:bar foo1:moo foo1:bla foo2:m00 foo2:bla foo1:abc """ self._clubak_t(["--tree", "-L"], input, "foo[1-2]:\nbar\nfoo2:\n m00\n bla\nfoo1:\n moo\n bla\n abc\n") # check conflicting options self._clubak_t(["--tree", "--fast"], input, '', 2, "error: incompatible tree options\n") def test_007_interpret_keys(self): """test clubak (--interpret-keys)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t(["--interpret-keys=auto"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--interpret-keys=auto"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--interpret-keys=never"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--interpret-keys=always"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--interpret-keys=always"], "foo[1-3]: bar\n", outfmt % "foo[1-3] (3)") self._clubak_t(["-b", "--interpret-keys=auto"], "[]: bar\n", outfmt % "[]") self._clubak_t(["-b", "--interpret-keys=never"], "[]: bar\n", outfmt % "[]") self._clubak_t(["-b", "--interpret-keys=always"], "[]: bar\n", '', 1, "Parse error: empty node name\n") def test_008_color(self): """test clubak (--color)""" outfmt = "---------------\n%s\n---------------\n bar\n" self._clubak_t(["-b"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--color=never"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-b", "--color=auto"], "foo: bar\n", outfmt % "foo") self._clubak_t(["-L", "--color=always"], "foo: bar\n", "\x1b[34mfoo: \x1b[0m bar\n") self._clubak_t(["-b", "--color=always"], "foo: bar\n", "\x1b[34m---------------\nfoo\n---------------\x1b[0m\n bar\n") def test_009_diff(self): """test clubak (--diff)""" self._clubak_t(["--diff"], "foo1: bar\nfoo2: bar", "") self._clubak_t(["--diff"], "foo1: bar\nfoo2: BAR", "--- foo1\n+++ foo2\n@@ -1,1 +1,1 @@\n- bar\n+ BAR\n") self._clubak_t(["--diff"], "foo1: bar\nfoo2: BAR\nfoo3: bar\n", "--- foo[1,3] (2)\n+++ foo2\n@@ -1,1 +1,1 @@\n- bar\n+ BAR\n") self._clubak_t(["--diff", "--color=always"], "foo1: bar\nfoo2: BAR\nfoo3: bar\n", "\x1b[1m--- foo[1,3] (2)\x1b[0m\n\x1b[1m+++ foo2\x1b[0m\n\x1b[36m@@ -1,1 +1,1 @@\x1b[0m\n\x1b[31m- bar\x1b[0m\n\x1b[32m+ BAR\x1b[0m\n") self._clubak_t(["--diff", "-d"], "foo: bar\n", "INPUT foo: bar\n", 0, "line_mode=False gather=True tree_depth=1\n") self._clubak_t(["--diff", "-L"], "foo1: bar\nfoo2: bar", "", 2, "clubak: error: option mismatch (diff not supported in line_mode)\n") if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(CLIClubakTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/CLIDisplayTest.py0000644000130500135250000001003011741571247017726 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.CLI.Display test suite # Written by S. Thiell 2010-09-25 """Unit test for CLI.Display""" import os import sys import tempfile import unittest sys.path.insert(0, '../lib') from ClusterShell.CLI.Display import Display, WHENCOLOR_CHOICES, VERB_STD from ClusterShell.CLI.OptionParser import OptionParser from ClusterShell.MsgTree import MsgTree from ClusterShell.NodeSet import NodeSet from ClusterShell.NodeUtils import GroupResolverConfig def makeTestFile(text): """Create a temporary file with the provided text.""" f = tempfile.NamedTemporaryFile() f.write(text) f.flush() return f class CLIDisplayTest(unittest.TestCase): """This test case performs a complete CLI.Display verification. Also CLI.OptionParser is used and some parts are verified btw. """ def testDisplay(self): """test CLI.Display""" parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) options, _ = parser.parse_args([]) ns = NodeSet("localhost") mtree = MsgTree() mtree.add("localhost", "message0") mtree.add("localhost", "message1") for whencolor in WHENCOLOR_CHOICES: # test whencolor switch for label in [True, False]: # test no-label switch options.label = label options.whencolor = whencolor disp = Display(options) # inhibit output disp.out = open("/dev/null", "w") disp.err = open("/dev/null", "w") self.assert_(disp != None) # test print_* methods... disp.print_line(ns, "foo bar") disp.print_line_error(ns, "foo bar") disp.print_gather(ns, list(mtree.walk())[0][0]) # test also string nodeset as parameter disp.print_gather("localhost", list(mtree.walk())[0][0]) # test line_mode property self.assertEqual(disp.line_mode, False) disp.line_mode = True self.assertEqual(disp.line_mode, True) disp.print_gather("localhost", list(mtree.walk())[0][0]) disp.line_mode = False self.assertEqual(disp.line_mode, False) def testDisplayRegroup(self): """test CLI.Display (regroup)""" parser = OptionParser("dummy") parser.install_display_options(verbose_options=True) options, _ = parser.parse_args(["-r"]) mtree = MsgTree() mtree.add("localhost", "message0") mtree.add("localhost", "message1") disp = Display(options) self.assertEqual(disp.regroup, True) disp.out = open("/dev/null", "w") disp.err = open("/dev/null", "w") self.assert_(disp != None) self.assertEqual(disp.line_mode, False) f = makeTestFile(""" # A comment [Main] default: local [local] map: echo localhost #all: list: echo all #reverse: """) res = GroupResolverConfig(f.name) ns = NodeSet("localhost", resolver=res) # nodeset.regroup() is performed by print_gather() disp.print_gather(ns, list(mtree.walk())[0][0]) def testDisplayClubak(self): """test CLI.Display for clubak""" parser = OptionParser("dummy") parser.install_display_options(separator_option=True, dshbak_compat=True) options, _ = parser.parse_args([]) disp = Display(options) self.assertEqual(bool(disp.gather), False) self.assertEqual(disp.line_mode, False) self.assertEqual(disp.label, True) self.assertEqual(disp.regroup, False) self.assertEqual(bool(disp.groupsource), False) self.assertEqual(disp.noprefix, False) self.assertEqual(disp.maxrc, False) self.assertEqual(disp.node_count, True) self.assertEqual(disp.verbosity, VERB_STD) if __name__ == '__main__': suites = [unittest.TestLoader().loadTestsFromTestCase(CLIDisplayTest)] unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites)) clustershell-1.6/tests/TLib.py0000644000130500135250000000420311741571247015770 0ustar thiellgpocre """Unit test small library""" import os import socket import sys import tempfile import time from ConfigParser import ConfigParser from StringIO import StringIO def my_node(): """Helper to get local short hostname.""" return socket.gethostname().split('.')[0] def load_cfg(name): """Load test configuration file as a new ConfigParser""" cfgparser = ConfigParser() cfgparser.read([ \ os.path.expanduser('~/.clustershell/tests/%s' % name), '/etc/clustershell/tests/%s' % name]) return cfgparser def chrono(func): """chrono decorator""" def timing(*args): start = time.time() res = func(*args) print "execution time: %f s" % (time.time() - start) return res return timing def make_temp_file(text, suffix='', dir=None): """Create a temporary file with the provided text.""" f = tempfile.NamedTemporaryFile(suffix=suffix, dir=dir) f.write(text) f.flush() return f def make_temp_dir(): """Create a temporary directory.""" dname = tempfile.mkdtemp() return dname def CLI_main(test, main, args, stdin, expected_stdout, expected_rc=0, expected_stderr=None): """Generic CLI main() direct calling function that allows code coverage checks.""" rc = -1 saved_stdin = sys.stdin saved_stdout = sys.stdout saved_stderr = sys.stderr if stdin is not None: sys.stdin = StringIO(stdin) sys.stdout = out = StringIO() sys.stderr = err = StringIO() sys.argv = args try: try: main() except SystemExit, exc: rc = int(str(exc)) finally: sys.stdout = saved_stdout sys.stderr = saved_stderr sys.stdin = saved_stdin if expected_stdout is not None: test.assertEqual(out.getvalue(), expected_stdout) out.close() if expected_stderr is not None: # check the end as stderr messages are often prefixed with argv[0] test.assertTrue(err.getvalue().endswith(expected_stderr), err.getvalue()) if expected_rc is not None: test.assertEqual(rc, expected_rc, "rc=%d err=%s" % (rc, err.getvalue())) err.close() clustershell-1.6/tests/TaskAdvancedTest.py0000644000130500135250000000750011741571247020331 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell (multi-tasks) test suite # Written by S. Thiell 2009-10-26 """Unit test for ClusterShell Task (multi)""" import copy import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import * class TaskAdvancedTest(unittest.TestCase): def tearDown(self): task_cleanup() def testTaskRun(self): """test task.run() behaving like task.resume()""" wrk = task_self().shell("true") task_self().run() def testTaskRunTimeout(self): """test task.run() behaving like task.resume(timeout)""" wrk = task_self().shell("sleep 1") self.assertRaises(TimeoutError, task_self().run, 0.3) wrk = task_self().shell("sleep 1") self.assertRaises(TimeoutError, task_self().run, timeout=0.3) def testTaskShellRunLocal(self): """test task.run() used as a synchronous task.shell() (local)""" wrk = task_self().run("false") self.assertTrue(wrk) self.assertEqual(task_self().max_retcode(), 1) # Timeout in shell() fashion way. wrk = task_self().run("sleep 1", timeout=0.3) self.assertTrue(wrk) self.assertEqual(task_self().num_timeout(), 1) def testTaskShellRunDistant(self): """test task.run() used as a synchronous task.shell() (distant)""" wrk = task_self().run("false", nodes="localhost") self.assertTrue(wrk) self.assertEqual(wrk.node_retcode("localhost"), 1) def testTaskEngineUserSelection(self): """test task engine user selection hack""" task_terminate() # Uh ho! It's a test case, not an example! Task._std_default['engine'] = 'select' self.assertEqual(task_self().info('engine'), 'select') task_terminate() def testTaskEngineWrongUserSelection(self): """test task engine wrong user selection hack""" try: task_terminate() # Uh ho! It's a test case, not an example! Task._std_default['engine'] = 'foobar' # Check for KeyError in case of wrong engine request self.assertRaises(KeyError, task_self) finally: Task._std_default['engine'] = 'auto' task_terminate() def testTaskNewThread1(self): """test task in new thread 1""" # create a task in a new thread task = Task() self.assert_(task != None) match = "test" # schedule a command in that task worker = task.shell("/bin/echo %s" % match) # run this task task.resume() # wait for the task to complete task_wait() # verify that the worker has completed self.assertEqual(worker.read(), match) def testTaskInNewThread2(self): """test task in new thread 2""" # create a task in a new thread task = Task() self.assert_(task != None) match = "again" # schedule a command in that task worker = task.shell("/bin/echo %s" % match) # run this task task.resume() # wait for the task to complete task_wait() # verify that the worker has completed self.assertEqual(worker.read(), match) def testTaskInNewThread3(self): """test task in new thread 3""" # create a task in a new thread task = Task() self.assert_(task != None) match = "once again" # schedule a command in that task worker = task.shell("/bin/echo %s" % match) # run this task task.resume() # wait for the task to complete task_wait() # verify that the worker has completed self.assertEqual(worker.read(), match) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskAdvancedTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/RangeSetErrorTest.py0000644000130500135250000000307511741571247020526 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.NodeSet.RangeSet error handling test suite # Written by S. Thiell 2008-09-28 """Unit test for RangeSet errors""" import copy import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.NodeSet import RangeSet from ClusterShell.NodeSet import RangeSetParseError class RangeSetErrorTest(unittest.TestCase): def _testRS(self, r, exc): try: rset = RangeSet(r) print rset except RangeSetParseError, e: self.assertEqual(RangeSetParseError, exc) return except: raise self.assert_(0, "error not detected/no exception raised") def testBadUsages(self): """test parse errors""" self._testRS("", RangeSetParseError) self._testRS("-", RangeSetParseError) self._testRS("A", RangeSetParseError) self._testRS("2-5/a", RangeSetParseError) self._testRS("3/2", RangeSetParseError) self._testRS("3-/2", RangeSetParseError) self._testRS("-3/2", RangeSetParseError) self._testRS("-/2", RangeSetParseError) self._testRS("4-a/2", RangeSetParseError) self._testRS("4-3/2", RangeSetParseError) self._testRS("4-5/-2", RangeSetParseError) self._testRS("4-2/-2", RangeSetParseError) self._testRS("004-002", RangeSetParseError) self._testRS("3-59/2,102a", RangeSetParseError) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(RangeSetErrorTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/binary_to_hex.py0000644000130500135250000000023211735120354017756 0ustar thiellgpocre#!/usr/bin/python import sys f = open("foo.gz", "rb") data = f.read(1) while data: sys.stdout.write("%02x" % ord(data)) data = f.read(1) print clustershell-1.6/tests/NodeSetGroupTest.py0000644000130500135250000005535411741571247020371 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.Node* test suite # Written by S. Thiell 2010-03-18 """Unit test for NodeSet with Group support""" import copy import shutil import sys import unittest sys.path.insert(0, '../lib') from TLib import * # Wildcard import for testing purpose import ClusterShell.NodeSet from ClusterShell.NodeSet import * from ClusterShell.NodeUtils import * def makeTestG1(): """Create a temporary group file 1""" f1 = make_temp_file(""" # oss: montana5,montana4 mds: montana6 io: montana[4-6] #42: montana3 compute: montana[32-163] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] chassis4: montana[38-39] chassis5: montana[40-41] chassis6: montana[42-43] chassis7: montana[44-45] chassis8: montana[46-47] chassis9: montana[48-49] chassis10: montana[50-51] chassis11: montana[52-53] chassis12: montana[54-55] Uppercase: montana[1-2] gpuchassis: @chassis[4-5] gpu: montana[38-41] all: montana[1-6,32-163] """) # /!\ Need to return file object and not f1.name, otherwise the temporary # file might be immediately unlinked. return f1 def makeTestG2(): """Create a temporary group file 2""" f2 = make_temp_file(""" # # para: montana[32-37,42-55] gpu: montana[38-41] """) return f2 def makeTestG3(): """Create a temporary group file 3""" f3 = make_temp_file(""" # # all: montana[32-55] para: montana[32-37,42-55] gpu: montana[38-41] login: montana[32-33] overclock: montana[41-42] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] single: idaho """) return f3 def makeTestR3(): """Create a temporary reverse group file 3""" r3 = make_temp_file(""" # # montana32: all,para,login,chassis1 montana33: all,para,login,chassis1 montana34: all,para,chassis2 montana35: all,para,chassis2 montana36: all,para,chassis3 montana37: all,para,chassis3 montana38: all,gpu montana39: all,gpu montana40: all,gpu montana41: all,gpu,overclock montana42: all,para,overclock montana43: all,para montana44: all,para montana45: all,para montana46: all,para montana47: all,para montana48: all,para montana49: all,para montana50: all,para montana51: all,para montana52: all,para montana53: all,para montana54: all,para montana55: all,para idaho: single """) return r3 class NodeSetGroupTest(unittest.TestCase): def testGroupResolverSimple(self): """test NodeSet with simple custom GroupResolver""" test_groups1 = makeTestG1() source = GroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) # create custom resolver with default source res = GroupResolver(source) self.assertFalse(res.has_node_groups()) self.assertFalse(res.has_node_groups("dummy_namespace")) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis3", resolver=res) self.assertEqual(str(nodeset), "montana[36-37]") nodeset = NodeSet("@chassis[3-4]", resolver=res) self.assertEqual(str(nodeset), "montana[36-39]") nodeset = NodeSet("@chassis[1,3,5]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-37,40-41]") nodeset = NodeSet("@chassis[2-12/2]", resolver=res) self.assertEqual(str(nodeset), "montana[34-35,38-39,42-43,46-47,50-51,54-55]") nodeset = NodeSet("@chassis[1,3-4,5-11/3]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-41,46-47,52-53]") # test recursive group gpuchassis nodeset1 = NodeSet("@chassis[4-5]", resolver=res) nodeset2 = NodeSet("@gpu", resolver=res) nodeset3 = NodeSet("@gpuchassis", resolver=res) self.assertEqual(nodeset1, nodeset2) self.assertEqual(nodeset2, nodeset3) # test also with some inline operations nodeset = NodeSet("montana3,@gpuchassis!montana39,montana77^montana38", resolver=res) self.assertEqual(str(nodeset), "montana[3,40-41,77]") def testAllNoResolver(self): """test NodeSet.fromall() with no resolver""" self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=RESOLVER_NOGROUP) def testGroupsNoResolver(self): """test NodeSet.groups() with no resolver""" nodeset = NodeSet("foo", resolver=RESOLVER_NOGROUP) self.assertRaises(NodeSetExternalError, nodeset.groups) def testGroupResolverAddSourceError(self): """test GroupResolver.add_source() error""" test_groups1 = makeTestG1() source = GroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) res = GroupResolver(source) # adding the same source again should raise ValueError self.assertRaises(ValueError, res.add_source, source) def testGroupResolverMinimal(self): """test NodeSet with minimal GroupResolver""" test_groups1 = makeTestG1() source = GroupSource("minimal", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, None, None, None) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) def testConfigEmpty(self): """test groups with an empty configuration file""" f = make_temp_file("") res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertRaises(GroupResolverSourceError, nodeset.regroup) # non existant group self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res) def testConfigBasicLocal(self): """test groups with a basic local config file""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(nodeset.groups().keys(), ["@foo"]) self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # No 'all' defined: all_nodes() should raise an error self.assertRaises(GroupSourceNoUpcall, res.all_nodes) # No 'reverse' defined: node_groups() should raise an error self.assertRaises(GroupSourceNoUpcall, res.node_groups, "example1") # regroup with rest nodeset = NodeSet("example[1-101]", resolver=res) self.assertEqual(nodeset.regroup(), "@foo,example101") # regroup incomplete nodeset = NodeSet("example[50-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[50-200]") # regroup no matching nodeset = NodeSet("example[102-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[102-200]") def testConfigWrongSyntax(self): """test wrong groups config syntax""" f = make_temp_file(""" # A comment [Main] default: local [local] something: echo example[1-100] """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigBasicLocalVerbose(self): """test groups with a basic local config file (verbose)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) res.set_verbosity(1) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicLocalAlternative(self): """test groups with a basic local config file (= alternative)""" f = make_temp_file(""" # A comment [Main] default=local [local] map=echo example[1-100] #all= list=echo foo #reverse= """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # @truc? def testConfigBasicEmptyDefault(self): """test groups with a empty default namespace""" f = make_temp_file(""" # A comment [Main] default: [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicNoMain(self): """test groups with a local config without main section""" f = make_temp_file(""" # A comment [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicWrongDefault(self): """test groups with a wrong default namespace""" f = make_temp_file(""" # A comment [Main] default: pointless [local] map: echo example[1-100] #all: list: echo foo #reverse: """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigQueryFailed(self): """test groups with config and failed query""" f = make_temp_file(""" # A comment [Main] default: local [local] map: false #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertRaises(NodeSetExternalError, nodeset.regroup) def testConfigRegroupWrongNamespace(self): """test groups by calling regroup(wrong_namespace)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertRaises(GroupResolverSourceError, nodeset.regroup, "unknown") def testConfigNoListButReverseQuery(self): """test groups with no list but reverse upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: echo foo reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigWithEmptyList(self): """test groups with list upcall returning nothing""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: : reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigResolverSources(self): """test sources() with groups config of 2 sources""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] [other] map: echo example[1-10] """) res = GroupResolverConfig(f.name) self.assertEqual(len(res.sources()), 2) self.assert_('local' in res.sources()) self.assert_('other' in res.sources()) def testConfigCrossRefs(self): """test groups config with cross references""" f = make_temp_file(""" # A comment [Main] default: other [local] map: echo example[1-100] [other] map: echo "foo: @local:foo" | sed -n 's/^$GROUP:\(.*\)/\\1/p' """) res = GroupResolverConfig(f.name) nodeset = NodeSet("@other:foo", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") def testConfigGroupsDirDummy(self): """test groups with groupsdir defined (dummy)""" f = make_temp_file(""" [Main] default: local groupsdir: /path/to/nowhere [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigGroupsDirExists(self): """test groups with groupsdir defined (real, other)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: """ % dname) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigGroupsDirExistsNoOther(self): """test groups with groupsdir defined (real, no other)""" dname1 = make_temp_dir() dname2 = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s %s """ % (dname1, dname2)) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname2) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname1, ignore_errors=True) shutil.rmtree(dname2, ignore_errors=True) def testConfigGroupsDirNotADirectory(self): """test groups with groupsdir defined (not a directory)""" dname = make_temp_dir() fdummy = make_temp_file("wrong") f = make_temp_file(""" [Main] default: new_local groupsdir: %s """ % fdummy.name) try: self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) finally: fdummy.close() f.close() shutil.rmtree(dname, ignore_errors=True) class NodeSetGroup2GSTest(unittest.TestCase): def setUp(self): """configure simple RESOLVER_STD_GROUP""" # create temporary groups file and keep a reference to avoid file closing self.test_groups1 = makeTestG1() self.test_groups2 = makeTestG2() # create 2 GroupSource objects default = GroupSource("default", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups1.name, None) source2 = GroupSource("source2", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups2.name, None) ClusterShell.NodeSet.RESOLVER_STD_GROUP = GroupResolver(default) ClusterShell.NodeSet.RESOLVER_STD_GROUP.add_source(source2) def tearDown(self): """restore default RESOLVER_STD_GROUP""" ClusterShell.NodeSet.RESOLVER_STD_GROUP = ClusterShell.NodeSet.DEF_RESOLVER_STD_GROUP del self.test_groups1 del self.test_groups2 def testGroupSyntaxes(self): """test NodeSet group operation syntaxes""" nodeset = NodeSet("@gpu") self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis[1-3,5]&@chassis[2-3]") self.assertEqual(str(nodeset), "montana[34-37]") nodeset1 = NodeSet("@io!@mds") nodeset2 = NodeSet("@oss") self.assertEqual(str(nodeset1), str(nodeset2)) self.assertEqual(str(nodeset1), "montana[4-5]") def testGroupListDefault(self): """test NodeSet group listing GroupResolver.grouplist()""" groups = ClusterShell.NodeSet.RESOLVER_STD_GROUP.grouplist() self.assertEqual(len(groups), 20) helper_groups = grouplist() self.assertEqual(len(helper_groups), 20) total = 0 nodes = NodeSet() for group in groups: ns = NodeSet("@%s" % group) total += len(ns) nodes.update(ns) self.assertEqual(total, 310) all_nodes = NodeSet.fromall() self.assertEqual(len(all_nodes), len(nodes)) self.assertEqual(all_nodes, nodes) def testGroupListSource2(self): """test NodeSet group listing GroupResolver.grouplist(source)""" groups = ClusterShell.NodeSet.RESOLVER_STD_GROUP.grouplist("source2") self.assertEqual(len(groups), 2) total = 0 for group in groups: total += len(NodeSet("@source2:%s" % group)) self.assertEqual(total, 24) def testGroupNoPrefix(self): """test NodeSet group noprefix option""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(nodeset.regroup("source2"), "@source2:para") self.assertEqual(nodeset.regroup("source2", noprefix=True), "@para") def testGroupGroups(self): """test NodeSet.groups()""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(sorted(nodeset.groups().keys()), ['@all', '@chassis1', '@chassis10', '@chassis11', '@chassis12', '@chassis2', '@chassis3', '@chassis6', '@chassis7', '@chassis8', '@chassis9', '@compute']) testns = NodeSet() for gnodes, inodes in nodeset.groups().itervalues(): testns.update(inodes) self.assertEqual(testns, nodeset) class NodeSetRegroupTest(unittest.TestCase): def testGroupResolverReverse(self): """test NodeSet GroupResolver with reverse upcall""" test_groups3 = makeTestG3() test_reverse3 = makeTestR3() source = GroupSource("test", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups3.name, "awk -F: '/^$NODE:/ { gsub(\",\",\"\\n\",$2); print $2 }' %s" % test_reverse3.name) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@all", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-55]")) self.assertEqual(str(nodeset), "montana[32-55]") self.assertEqual(nodeset.regroup(), "@all") self.assertEqual(nodeset.regroup(), "@all") nodeset = NodeSet("@overclock", resolver=res) self.assertEqual(nodeset, NodeSet("montana[41-42]")) self.assertEqual(str(nodeset), "montana[41-42]") self.assertEqual(nodeset.regroup(), "@overclock") self.assertEqual(nodeset.regroup(), "@overclock") nodeset = NodeSet("@gpu,@overclock", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-42]")) self.assertEqual(str(nodeset), "montana[38-42]") # un-overlap :) self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(overlap=True), "@gpu,@overclock") nodeset = NodeSet("montana41", resolver=res) self.assertEqual(nodeset.regroup(), "montana41") self.assertEqual(nodeset.regroup(), "montana41") # test regroup code when using unindexed node nodeset = NodeSet("idaho", resolver=res) self.assertEqual(nodeset.regroup(), "@single") self.assertEqual(nodeset.regroup(), "@single") nodeset = NodeSet("@single", resolver=res) self.assertEqual(str(nodeset), "idaho") # unresolved unindexed: nodeset = NodeSet("utah", resolver=res) self.assertEqual(nodeset.regroup(), "utah") self.assertEqual(nodeset.regroup(), "utah") nodeset = NodeSet("@all!montana38", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-37,39-55]")) self.assertEqual(str(nodeset), "montana[32-37,39-55]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") nodeset = NodeSet("montana[32-37]", resolver=res) self.assertEqual(nodeset.regroup(), "@chassis[1-3]") self.assertEqual(nodeset.regroup(), "@chassis[1-3]") if __name__ == '__main__': suites = [unittest.TestLoader().loadTestsFromTestCase(NodeSetGroupTest)] suites.append(unittest.TestLoader().loadTestsFromTestCase(NodeSetGroup2GSTest)) suites.append(unittest.TestLoader().loadTestsFromTestCase(NodeSetRegroupTest)) unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites)) clustershell-1.6/tests/NodeSetTest.py0000644000130500135250000017672111741571247017356 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell.NodeSet test suite # Written by S. Thiell 2007-12-05 """Unit test for NodeSet""" import binascii import copy import pickle import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.NodeSet import RangeSet, NodeSet, fold, expand from ClusterShell.NodeSet import NodeGroupBase, NodeSetBase class NodeSetTest(unittest.TestCase): def _assertNode(self, nodeset, nodename): self.assertEqual(str(nodeset), nodename) self.assertEqual(list(nodeset), [ nodename ]) self.assertEqual(len(nodeset), 1) def testUnnumberedNode(self): """test NodeSet with unnumbered node""" nodeset = NodeSet("cws-machin") self._assertNode(nodeset, "cws-machin") def testNodeZero(self): """test NodeSet with node0""" nodeset = NodeSet("supercluster0") self._assertNode(nodeset, "supercluster0") def testNoPrefix(self): """test NodeSet with node without prefix""" nodeset = NodeSet("0cluster") self._assertNode(nodeset, "0cluster") nodeset = NodeSet("[0]cluster") self._assertNode(nodeset, "0cluster") def testWhitespacePrefix(self): """test NodeSet parsing ignoring whitespace""" nodeset = NodeSet(" tigrou2 , tigrou7 , tigrou[5,9-11] ") self.assertEqual(str(nodeset), "tigrou[2,5,7,9-11]") nodeset = NodeSet(" tigrou2 , tigrou5,tigrou7 , tigrou[ 9 - 11 ] ") self.assertEqual(str(nodeset), "tigrou[2,5,7,9-11]") def testFromListConstructor(self): """test NodeSet.fromlist() constructor""" nodeset = NodeSet.fromlist([ "cluster33" ]) self._assertNode(nodeset, "cluster33") nodeset = NodeSet.fromlist([ "cluster0", "cluster1", "cluster2", "cluster5", "cluster8", "cluster4", "cluster3" ]) self.assertEqual(str(nodeset), "cluster[0-5,8]") self.assertEqual(len(nodeset), 7) # updaten() test nodeset.updaten(["cluster10", "cluster9"]) self.assertEqual(str(nodeset), "cluster[0-5,8-10]") self.assertEqual(len(nodeset), 9) # single nodes test nodeset = NodeSet.fromlist([ "cluster0", "cluster1", "cluster", "wool", "cluster3" ]) self.assertEqual(str(nodeset), "cluster,cluster[0-1,3],wool") self.assertEqual(len(nodeset), 5) def testDigitInPrefix(self): """test NodeSet digit in prefix""" nodeset = NodeSet("clu-0-3") self._assertNode(nodeset, "clu-0-3") nodeset = NodeSet("clu-0-[3-23]") self.assertEqual(str(nodeset), "clu-0-[3-23]") def testNodeWithPercent(self): """test NodeSet on nodename with % character""" nodeset = NodeSet("cluster%s3") self._assertNode(nodeset, "cluster%s3") nodeset = NodeSet("clust%ser[3-30]") self.assertEqual(str(nodeset), "clust%ser[3-30]") def testNodeEightPad(self): """test NodeSet padding feature""" nodeset = NodeSet("cluster008") self._assertNode(nodeset, "cluster008") def testNodeRangeIncludingZero(self): """test NodeSet with node range including zero""" nodeset = NodeSet("cluster[0-10]") self.assertEqual(str(nodeset), "cluster[0-10]") self.assertEqual(list(nodeset), [ "cluster0", "cluster1", "cluster2", "cluster3", "cluster4", "cluster5", "cluster6", "cluster7", "cluster8", "cluster9", "cluster10" ]) self.assertEqual(len(nodeset), 11) def testSingle(self): """test NodeSet single cluster node""" nodeset = NodeSet("cluster115") self._assertNode(nodeset, "cluster115") def testSingleNodeInRange(self): """test NodeSet single cluster node in range""" nodeset = NodeSet("cluster[115]") self._assertNode(nodeset, "cluster115") def testRange(self): """test NodeSet with simple range""" nodeset = NodeSet("cluster[1-100]") self.assertEqual(str(nodeset), "cluster[1-100]") self.assertEqual(len(nodeset), 100) i = 1 for n in nodeset: self.assertEqual(n, "cluster%d" % i) i += 1 self.assertEqual(i, 101) lst = copy.deepcopy(list(nodeset)) i = 1 for n in lst: self.assertEqual(n, "cluster%d" % i) i += 1 self.assertEqual(i, 101) def testRangeWithPadding1(self): """test NodeSet with range with padding (1)""" nodeset = NodeSet("cluster[0001-0100]") self.assertEqual(str(nodeset), "cluster[0001-0100]") self.assertEqual(len(nodeset), 100) i = 1 for n in nodeset: self.assertEqual(n, "cluster%04d" % i) i += 1 self.assertEqual(i, 101) def testRangeWithPadding2(self): """test NodeSet with range with padding (2)""" nodeset = NodeSet("cluster[0034-8127]") self.assertEqual(str(nodeset), "cluster[0034-8127]") self.assertEqual(len(nodeset), 8094) i = 34 for n in nodeset: self.assertEqual(n, "cluster%04d" % i) i += 1 self.assertEqual(i, 8128) def testRangeWithSuffix(self): """test NodeSet with simple range with suffix""" nodeset = NodeSet("cluster[50-99]-ipmi") self.assertEqual(str(nodeset), "cluster[50-99]-ipmi") i = 50 for n in nodeset: self.assertEqual(n, "cluster%d-ipmi" % i) i += 1 self.assertEqual(i, 100) def testCommaSeparatedAndRangeWithPadding(self): """test NodeSet comma separated, range and padding""" nodeset = NodeSet("cluster[0001,0002,1555-1559]") self.assertEqual(str(nodeset), "cluster[0001-0002,1555-1559]") self.assertEqual(list(nodeset), [ "cluster0001", "cluster0002", "cluster1555", "cluster1556", "cluster1557", "cluster1558", "cluster1559" ]) def testCommaSeparatedAndRangeWithPaddingWithSuffix(self): """test NodeSet comma separated, range and padding with suffix""" nodeset = NodeSet("cluster[0001,0002,1555-1559]-ipmi") self.assertEqual(str(nodeset), "cluster[0001-0002,1555-1559]-ipmi") self.assertEqual(list(nodeset), [ "cluster0001-ipmi", "cluster0002-ipmi", "cluster1555-ipmi", "cluster1556-ipmi", "cluster1557-ipmi", "cluster1558-ipmi", "cluster1559-ipmi" ]) def testVeryBigRange(self): """test NodeSet iterations with big range size""" nodeset = NodeSet("bigcluster[1-1000000]") self.assertEqual(str(nodeset), "bigcluster[1-1000000]") self.assertEqual(len(nodeset), 1000000) i = 1 for n in nodeset: assert n == "bigcluster%d" % i i += 1 def testCommaSeparated(self): """test NodeSet comma separated to ranges (folding)""" nodeset = NodeSet("cluster115,cluster116,cluster117,cluster130,cluster166") self.assertEqual(str(nodeset), "cluster[115-117,130,166]") self.assertEqual(len(nodeset), 5) def testCommaSeparatedAndRange(self): """test NodeSet comma separated and range to ranges (folding)""" nodeset = NodeSet("cluster115,cluster116,cluster117,cluster130,cluster[166-169],cluster170") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") def testCommaSeparatedAndRanges(self): """test NodeSet comma separated and ranges to ranges (folding)""" nodeset = NodeSet("cluster[115-117],cluster130,cluster[166-169],cluster170") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") def testSimpleStringUpdates(self): """test NodeSet simple string-based update()""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") nodeset.update("cluster171") self.assertEqual(str(nodeset), "cluster[115-117,130,166-171]") nodeset.update("cluster172") self.assertEqual(str(nodeset), "cluster[115-117,130,166-172]") nodeset.update("cluster174") self.assertEqual(str(nodeset), "cluster[115-117,130,166-172,174]") nodeset.update("cluster113") self.assertEqual(str(nodeset), "cluster[113,115-117,130,166-172,174]") nodeset.update("cluster173") self.assertEqual(str(nodeset), "cluster[113,115-117,130,166-174]") nodeset.update("cluster114") self.assertEqual(str(nodeset), "cluster[113-117,130,166-174]") def testSimpleNodeSetUpdates(self): """test NodeSet simple nodeset-based update()""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") nodeset.update(NodeSet("cluster171")) self.assertEqual(str(nodeset), "cluster[115-117,130,166-171]") nodeset.update(NodeSet("cluster172")) self.assertEqual(str(nodeset), "cluster[115-117,130,166-172]") nodeset.update(NodeSet("cluster174")) self.assertEqual(str(nodeset), "cluster[115-117,130,166-172,174]") nodeset.update(NodeSet("cluster113")) self.assertEqual(str(nodeset), "cluster[113,115-117,130,166-172,174]") nodeset.update(NodeSet("cluster173")) self.assertEqual(str(nodeset), "cluster[113,115-117,130,166-174]") nodeset.update(NodeSet("cluster114")) self.assertEqual(str(nodeset), "cluster[113-117,130,166-174]") def testStringUpdatesFromEmptyNodeSet(self): """test NodeSet string-based NodeSet.update() from empty nodeset""" nodeset = NodeSet() self.assertEqual(str(nodeset), "") nodeset.update("cluster115") self.assertEqual(str(nodeset), "cluster115") nodeset.update("cluster118") self.assertEqual(str(nodeset), "cluster[115,118]") nodeset.update("cluster[116-117]") self.assertEqual(str(nodeset), "cluster[115-118]") def testNodeSetUpdatesFromEmptyNodeSet(self): """test NodeSet-based update() method from empty nodeset""" nodeset = NodeSet() self.assertEqual(str(nodeset), "") nodeset.update(NodeSet("cluster115")) self.assertEqual(str(nodeset), "cluster115") nodeset.update(NodeSet("cluster118")) self.assertEqual(str(nodeset), "cluster[115,118]") nodeset.update(NodeSet("cluster[116-117]")) self.assertEqual(str(nodeset), "cluster[115-118]") def testUpdatesWithSeveralPrefixes(self): """test NodeSet.update() using several prefixes""" nodeset = NodeSet("cluster3") self.assertEqual(str(nodeset), "cluster3") nodeset.update("cluster5") self.assertEqual(str(nodeset), "cluster[3,5]") nodeset.update("tiger5") self.assertEqual(str(nodeset), "cluster[3,5],tiger5") nodeset.update("tiger7") self.assertEqual(str(nodeset), "cluster[3,5],tiger[5,7]") nodeset.update("tiger6") self.assertEqual(str(nodeset), "cluster[3,5],tiger[5-7]") nodeset.update("cluster4") self.assertEqual(str(nodeset), "cluster[3-5],tiger[5-7]") def testOperatorUnion(self): """test NodeSet union | operator""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # 1 n_test1 = nodeset | NodeSet("cluster171") self.assertEqual(str(n_test1), "cluster[115-117,130,166-171]") nodeset2 = nodeset.copy() self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") nodeset2 |= NodeSet("cluster171") self.assertEqual(str(nodeset2), "cluster[115-117,130,166-171]") # btw validate modifying a copy did not change original self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # 2 n_test2 = n_test1 | NodeSet("cluster172") self.assertEqual(str(n_test2), "cluster[115-117,130,166-172]") nodeset2 |= NodeSet("cluster172") self.assertEqual(str(nodeset2), "cluster[115-117,130,166-172]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # 3 n_test1 = n_test2 | NodeSet("cluster113") self.assertEqual(str(n_test1), "cluster[113,115-117,130,166-172]") nodeset2 |= NodeSet("cluster113") self.assertEqual(str(nodeset2), "cluster[113,115-117,130,166-172]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # 4 n_test2 = n_test1 | NodeSet("cluster114") self.assertEqual(str(n_test2), "cluster[113-117,130,166-172]") nodeset2 |= NodeSet("cluster114") self.assertEqual(str(nodeset2), "cluster[113-117,130,166-172]") self.assertEqual(nodeset2, NodeSet("cluster[113-117,130,166-172]")) self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # more original = NodeSet("cluster0") nodeset = original.copy() for i in xrange(1, 3000): nodeset = nodeset | NodeSet("cluster%d" % i) self.assertEqual(len(nodeset), 3000) self.assertEqual(str(nodeset), "cluster[0-2999]") self.assertEqual(len(original), 1) self.assertEqual(str(original), "cluster0") nodeset2 = original.copy() for i in xrange(1, 3000): nodeset2 |= NodeSet("cluster%d" % i) self.assertEqual(nodeset, nodeset2) for i in xrange(3000, 5000): nodeset2 |= NodeSet("cluster%d" % i) self.assertEqual(len(nodeset2), 5000) self.assertEqual(str(nodeset2), "cluster[0-4999]") self.assertEqual(len(nodeset), 3000) self.assertEqual(str(nodeset), "cluster[0-2999]") self.assertEqual(len(original), 1) self.assertEqual(str(original), "cluster0") def testOperatorUnionFromEmptyNodeSet(self): """test NodeSet union | operator from empty nodeset""" nodeset = NodeSet() self.assertEqual(str(nodeset), "") n_test1 = nodeset | NodeSet("cluster115") self.assertEqual(str(n_test1), "cluster115") n_test2 = n_test1 | NodeSet("cluster118") self.assertEqual(str(n_test2), "cluster[115,118]") n_test1 = n_test2 | NodeSet("cluster[116,117]") self.assertEqual(str(n_test1), "cluster[115-118]") def testOperatorUnionWithSeveralPrefixes(self): """test NodeSet union | operator using several prefixes""" nodeset = NodeSet("cluster3") self.assertEqual(str(nodeset), "cluster3") n_test1 = nodeset | NodeSet("cluster5") self.assertEqual(str(n_test1), "cluster[3,5]") n_test2 = n_test1 | NodeSet("tiger5") self.assertEqual(str(n_test2), "cluster[3,5],tiger5") n_test1 = n_test2 | NodeSet("tiger7") self.assertEqual(str(n_test1), "cluster[3,5],tiger[5,7]") n_test2 = n_test1 | NodeSet("tiger6") self.assertEqual(str(n_test2), "cluster[3,5],tiger[5-7]") n_test1 = n_test2 | NodeSet("cluster4") self.assertEqual(str(n_test1), "cluster[3-5],tiger[5-7]") def testOperatorSub(self): """test NodeSet difference/sub - operator""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # __sub__ n_test1 = nodeset - NodeSet("cluster[115,130]") self.assertEqual(str(n_test1), "cluster[116-117,166-170]") nodeset2 = copy.copy(nodeset) nodeset2 -= NodeSet("cluster[115,130]") self.assertEqual(str(nodeset2), "cluster[116-117,166-170]") self.assertEqual(nodeset2, NodeSet("cluster[116-117,166-170]")) def testOperatorAnd(self): """test NodeSet intersection/and & operator""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # __and__ n_test1 = nodeset & NodeSet("cluster[115-167]") self.assertEqual(str(n_test1), "cluster[115-117,130,166-167]") nodeset2 = copy.copy(nodeset) nodeset2 &= NodeSet("cluster[115-167]") self.assertEqual(str(nodeset2), "cluster[115-117,130,166-167]") self.assertEqual(nodeset2, NodeSet("cluster[115-117,130,166-167]")) def testOperatorXor(self): """test NodeSet symmetric_difference/xor & operator""" nodeset = NodeSet("cluster[115-117,130,166-170]") self.assertEqual(str(nodeset), "cluster[115-117,130,166-170]") # __xor__ n_test1 = nodeset ^ NodeSet("cluster[115-167]") self.assertEqual(str(n_test1), "cluster[118-129,131-165,168-170]") nodeset2 = copy.copy(nodeset) nodeset2 ^= NodeSet("cluster[115-167]") self.assertEqual(str(nodeset2), "cluster[118-129,131-165,168-170]") self.assertEqual(nodeset2, NodeSet("cluster[118-129,131-165,168-170]")) def testLen(self): """test NodeSet len() results""" nodeset = NodeSet() self.assertEqual(len(nodeset), 0) nodeset.update("cluster[116-120]") self.assertEqual(len(nodeset), 5) nodeset = NodeSet("roma[50-99]-ipmi,cors[113,115-117,130,166-172],cws-tigrou,tigrou3") self.assertEqual(len(nodeset), 50+12+1+1) nodeset = NodeSet("roma[50-99]-ipmi,cors[113,115-117,130,166-172],cws-tigrou,tigrou3,tigrou3,tigrou3,cors116") self.assertEqual(len(nodeset), 50+12+1+1) def testIntersection(self): """test NodeSet.intersection()""" nsstr = "red[34-55,76-249,300-403],blue,green" nodeset = NodeSet(nsstr) self.assertEqual(len(nodeset), 302) nsstr2 = "red[32-57,72-249,300-341],blue,yellow" nodeset2 = NodeSet(nsstr2) self.assertEqual(len(nodeset2), 248) inodeset = nodeset.intersection(nodeset2) # originals should not change self.assertEqual(len(nodeset), 302) self.assertEqual(len(nodeset2), 248) self.assertEqual(str(nodeset), "blue,green,red[34-55,76-249,300-403]") self.assertEqual(str(nodeset2), "blue,red[32-57,72-249,300-341],yellow") # result self.assertEqual(len(inodeset), 239) self.assertEqual(str(inodeset), "blue,red[34-55,76-249,300-341]") def testIntersectUpdate(self): """test NodeSet.intersection_update()""" nsstr = "red[34-55,76-249,300-403]" nodeset = NodeSet(nsstr) self.assertEqual(len(nodeset), 300) nodeset = NodeSet(nsstr) nodeset.intersection_update("red[78-80]") self.assertEqual(str(nodeset), "red[78-80]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red[54-249]") self.assertEqual(str(nodeset), "red[54-55,76-249]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red[55-249]") self.assertEqual(str(nodeset), "red[55,76-249]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red[55-100]") self.assertEqual(str(nodeset), "red[55,76-100]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red[55-76]") self.assertEqual(str(nodeset), "red[55,76]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red[55,76]") self.assertEqual(str(nodeset), "red[55,76]") nodeset = NodeSet(nsstr) nodeset.intersection_update("red55,red76") self.assertEqual(str(nodeset), "red[55,76]") # same with intersect(NodeSet) nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[78-80]")) self.assertEqual(str(nodeset), "red[78-80]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[54-249]")) self.assertEqual(str(nodeset), "red[54-55,76-249]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[55-249]")) self.assertEqual(str(nodeset), "red[55,76-249]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[55-100]")) self.assertEqual(str(nodeset), "red[55,76-100]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[55-76]")) self.assertEqual(str(nodeset), "red[55,76]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red[55,76]")) self.assertEqual(str(nodeset), "red[55,76]") nodeset = NodeSet(nsstr) nodeset.intersection_update(NodeSet("red55,red76")) self.assertEqual(str(nodeset), "red[55,76]") # single nodes test nodeset = NodeSet("red,blue,yellow") nodeset.intersection_update("blue,green,yellow") self.assertEqual(len(nodeset), 2) self.assertEqual(str(nodeset), "blue,yellow") def testIntersectSelf(self): """test Nodeset.intersection_update(self)""" nodeset = NodeSet("red4955") self.assertEqual(len(nodeset), 1) nodeset.intersection_update(nodeset) self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "red4955") nodeset = NodeSet("red") self.assertEqual(len(nodeset), 1) nodeset.intersection_update(nodeset) self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "red") nodeset = NodeSet("red") self.assertEqual(len(nodeset), 1) nodeset.intersection_update("red") self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "red") nodeset = NodeSet("red") self.assertEqual(len(nodeset), 1) nodeset.intersection_update("blue") self.assertEqual(len(nodeset), 0) nodeset = NodeSet("red[78-149]") self.assertEqual(len(nodeset), 72) nodeset.intersection_update(nodeset) self.assertEqual(len(nodeset), 72) self.assertEqual(str(nodeset), "red[78-149]") def testIntersectReturnNothing(self): """test NodeSet intersect that returns empty NodeSet""" nodeset = NodeSet("blue43") self.assertEqual(len(nodeset), 1) nodeset.intersection_update("blue42") self.assertEqual(len(nodeset), 0) def testDifference(self): """test NodeSet.difference()""" nsstr = "red[34-55,76-249,300-403],blue,green" nodeset = NodeSet(nsstr) self.assertEqual(len(nodeset), 302) nsstr2 = "red[32-57,72-249,300-341],blue,yellow" nodeset2 = NodeSet(nsstr2) self.assertEqual(len(nodeset2), 248) inodeset = nodeset.difference(nodeset2) # originals should not change self.assertEqual(len(nodeset), 302) self.assertEqual(len(nodeset2), 248) self.assertEqual(str(nodeset), "blue,green,red[34-55,76-249,300-403]") self.assertEqual(str(nodeset2), "blue,red[32-57,72-249,300-341],yellow") # result self.assertEqual(len(inodeset), 63) self.assertEqual(str(inodeset), "green,red[342-403]") def testDifferenceUpdate(self): """test NodeSet.difference_update()""" # nodeset-based subs nodeset = NodeSet("yellow120") self.assertEqual(len(nodeset), 1) nodeset.difference_update(NodeSet("yellow120")) self.assertEqual(len(nodeset), 0) nodeset = NodeSet("yellow") self.assertEqual(len(nodeset), 1) nodeset.difference_update(NodeSet("yellow")) self.assertEqual(len(nodeset), 0) nodeset = NodeSet("yellow") self.assertEqual(len(nodeset), 1) nodeset.difference_update(NodeSet("blue")) self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "yellow") nodeset = NodeSet("yellow[45-240,570-764,800]") self.assertEqual(len(nodeset), 392) nodeset.difference_update(NodeSet("yellow[45-240,570-764,800]")) self.assertEqual(len(nodeset), 0) # same with string-based subs nodeset = NodeSet("yellow120") self.assertEqual(len(nodeset), 1) nodeset.difference_update("yellow120") self.assertEqual(len(nodeset), 0) nodeset = NodeSet("yellow") self.assertEqual(len(nodeset), 1) nodeset.difference_update("yellow") self.assertEqual(len(nodeset), 0) nodeset = NodeSet("yellow") self.assertEqual(len(nodeset), 1) nodeset.difference_update("blue") self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "yellow") nodeset = NodeSet("yellow[45-240,570-764,800]") self.assertEqual(len(nodeset), 392) nodeset.difference_update("yellow[45-240,570-764,800]") self.assertEqual(len(nodeset), 0) def testSubSelf(self): """test NodeSet.difference_update() method (self)""" nodeset = NodeSet("yellow[120-148,167]") nodeset.difference_update(nodeset) self.assertEqual(len(nodeset), 0) def testSubMore(self): """test NodeSet.difference_update() method (more)""" nodeset = NodeSet("yellow[120-160]") self.assertEqual(len(nodeset), 41) for i in range(120, 161): nodeset.difference_update(NodeSet("yellow%d" % i)) self.assertEqual(len(nodeset), 0) def testSubsAndAdds(self): """test NodeSet.update() and difference_update() together""" nodeset = NodeSet("yellow[120-160]") self.assertEqual(len(nodeset), 41) for i in range(120, 131): nodeset.difference_update(NodeSet("yellow%d" % i)) self.assertEqual(len(nodeset), 30) for i in range(1940, 2040): nodeset.update(NodeSet("yellow%d" % i)) self.assertEqual(len(nodeset), 130) def testSubsAndAddsMore(self): """test NodeSet.update() and difference_update() together (more)""" nodeset = NodeSet("yellow[120-160]") self.assertEqual(len(nodeset), 41) for i in range(120, 131): nodeset.difference_update(NodeSet("yellow%d" % i)) nodeset.update(NodeSet("yellow%d" % (i + 1000))) self.assertEqual(len(nodeset), 41) for i in range(1120, 1131): nodeset.difference_update(NodeSet("yellow%d" % i)) nodeset.difference_update(NodeSet("yellow[131-160]")) self.assertEqual(len(nodeset), 0) def testSubsAndAddsMoreDigit(self): """test NodeSet.update() and difference_update() together (with other digit in prefix)""" nodeset = NodeSet("clu-3-[120-160]") self.assertEqual(len(nodeset), 41) for i in range(120, 131): nodeset.difference_update(NodeSet("clu-3-[%d]" % i)) nodeset.update(NodeSet("clu-3-[%d]" % (i + 1000))) self.assertEqual(len(nodeset), 41) for i in range(1120, 1131): nodeset.difference_update(NodeSet("clu-3-[%d]" % i)) nodeset.difference_update(NodeSet("clu-3-[131-160]")) self.assertEqual(len(nodeset), 0) def testSubUnknownNodes(self): """test NodeSet.difference_update() with unknown nodes""" nodeset = NodeSet("yellow[120-160]") self.assertEqual(len(nodeset), 41) nodeset.difference_update("red[35-49]") self.assertEqual(len(nodeset), 41) self.assertEqual(str(nodeset), "yellow[120-160]") def testSubMultiplePrefix(self): """test NodeSet.difference_update() with multiple prefixes""" nodeset = NodeSet("yellow[120-160],red[32-147],blue3,green,white[2-3940],blue4,blue303") self.assertEqual(len(nodeset), 4100) for i in range(120, 131): nodeset.difference_update(NodeSet("red%d" % i)) nodeset.update(NodeSet("red%d" % (i + 1000))) nodeset.update(NodeSet("yellow%d" % (i + 1000))) self.assertEqual(len(nodeset), 4111) for i in range(1120, 1131): nodeset.difference_update(NodeSet("red%d" % i)) nodeset.difference_update(NodeSet("white%d" %i)) nodeset.difference_update(NodeSet("yellow[131-160]")) self.assertEqual(len(nodeset), 4059) nodeset.difference_update(NodeSet("green")) self.assertEqual(len(nodeset), 4058) def testGetItem(self): """test NodeSet getitem()""" nodeset = NodeSet("yeti[30,34-51,59-60]") self.assertEqual(len(nodeset), 21) self.assertEqual(nodeset[0], "yeti30") self.assertEqual(nodeset[1], "yeti34") self.assertEqual(nodeset[2], "yeti35") self.assertEqual(nodeset[3], "yeti36") self.assertEqual(nodeset[18], "yeti51") self.assertEqual(nodeset[19], "yeti59") self.assertEqual(nodeset[20], "yeti60") self.assertRaises(IndexError, nodeset.__getitem__, 21) # negative indices self.assertEqual(nodeset[-1], "yeti60") for n in range(1, len(nodeset)): self.assertEqual(nodeset[-n], nodeset[len(nodeset)-n]) # test getitem with some nodes without range nodeset = NodeSet("abc,cde[3-9,11],fgh") self.assertEqual(len(nodeset), 10) self.assertEqual(nodeset[0], "abc") self.assertEqual(nodeset[1], "cde3") self.assertEqual(nodeset[2], "cde4") self.assertEqual(nodeset[3], "cde5") self.assertEqual(nodeset[7], "cde9") self.assertEqual(nodeset[8], "cde11") self.assertEqual(nodeset[9], "fgh") self.assertRaises(IndexError, nodeset.__getitem__, 10) # test getitem with rangeset padding nodeset = NodeSet("prune[003-034,349-353/2]") self.assertEqual(len(nodeset), 35) self.assertEqual(nodeset[0], "prune003") self.assertEqual(nodeset[1], "prune004") self.assertEqual(nodeset[31], "prune034") self.assertEqual(nodeset[32], "prune349") self.assertEqual(nodeset[33], "prune351") self.assertEqual(nodeset[34], "prune353") self.assertRaises(IndexError, nodeset.__getitem__, 35) def testGetSlice(self): """test NodeSet getitem() with slice""" nodeset = NodeSet("yeti[30,34-51,59-60]") self.assertEqual(len(nodeset), 21) self.assertEqual(len(nodeset[0:2]), 2) self.assertEqual(str(nodeset[0:2]), "yeti[30,34]") self.assertEqual(len(nodeset[1:3]), 2) self.assertEqual(str(nodeset[1:3]), "yeti[34-35]") self.assertEqual(len(nodeset[19:21]), 2) self.assertEqual(str(nodeset[19:21]), "yeti[59-60]") self.assertEqual(len(nodeset[20:22]), 1) self.assertEqual(str(nodeset[20:22]), "yeti60") self.assertEqual(len(nodeset[21:24]), 0) self.assertEqual(str(nodeset[21:24]), "") # negative indices self.assertEqual(str(nodeset[:-1]), "yeti[30,34-51,59]") self.assertEqual(str(nodeset[:-2]), "yeti[30,34-51]") self.assertEqual(str(nodeset[1:-2]), "yeti[34-51]") self.assertEqual(str(nodeset[2:-2]), "yeti[35-51]") self.assertEqual(str(nodeset[9:-3]), "yeti[42-50]") self.assertEqual(str(nodeset[10:-9]), "yeti[43-44]") self.assertEqual(str(nodeset[10:-10]), "yeti43") self.assertEqual(str(nodeset[11:-10]), "") self.assertEqual(str(nodeset[11:-11]), "") self.assertEqual(str(nodeset[::-2]), "yeti[30,35,37,39,41,43,45,47,49,51,60]") self.assertEqual(str(nodeset[::-3]), "yeti[35,38,41,44,47,50,60]") # advanced self.assertEqual(str(nodeset[0:10:2]), "yeti[30,35,37,39,41]") self.assertEqual(str(nodeset[1:11:2]), "yeti[34,36,38,40,42]") self.assertEqual(str(nodeset[:11:3]), "yeti[30,36,39,42]") self.assertEqual(str(nodeset[11::4]), "yeti[44,48,59]") self.assertEqual(str(nodeset[14:]), "yeti[47-51,59-60]") self.assertEqual(str(nodeset[:]), "yeti[30,34-51,59-60]") self.assertEqual(str(nodeset[::5]), "yeti[30,38,43,48,60]") # with unindexed nodes nodeset = NodeSet("foo,bar,bur") self.assertEqual(len(nodeset), 3) self.assertEqual(len(nodeset[0:2]), 2) self.assertEqual(str(nodeset[0:2]), "bar,bur") self.assertEqual(str(nodeset[1:2]), "bur") self.assertEqual(str(nodeset[1:3]), "bur,foo") self.assertEqual(str(nodeset[2:4]), "foo") nodeset = NodeSet("foo,bar,bur3,bur1") self.assertEqual(len(nodeset), 4) self.assertEqual(len(nodeset[0:2]), 2) self.assertEqual(len(nodeset[1:3]), 2) self.assertEqual(len(nodeset[2:4]), 2) self.assertEqual(len(nodeset[3:5]), 1) self.assertEqual(str(nodeset[2:3]), "bur3") self.assertEqual(str(nodeset[3:4]), "foo") self.assertEqual(str(nodeset[0:2]), "bar,bur1") self.assertEqual(str(nodeset[1:3]), "bur[1,3]") # using range step nodeset = NodeSet("yeti[10-98/2]") self.assertEqual(str(nodeset[1:9:3]), "yeti[12,18,24]") self.assertEqual(str(nodeset[::17]), "yeti[10,44,78]") nodeset = NodeSet("yeti[10-98/2]", autostep=2) self.assertEqual(str(nodeset[22:29]), "yeti[54-66/2]") # stepping scalability nodeset = NodeSet("yeti[10-9800/2]", autostep=2) self.assertEqual(str(nodeset[22:2900]), "yeti[54-5808/2]") self.assertEqual(str(nodeset[22:2900:3]), "yeti[54-5808/6]") nodeset = NodeSet("yeti[10-14,20-26,30-33]") self.assertEqual(str(nodeset[2:6]), "yeti[12-14,20]") # multiple patterns nodeset = NodeSet("stone[1-9],wood[1-9]") self.assertEqual(str(nodeset[:]), "stone[1-9],wood[1-9]") self.assertEqual(str(nodeset[1:2]), "stone2") self.assertEqual(str(nodeset[8:9]), "stone9") self.assertEqual(str(nodeset[8:10]), "stone9,wood1") self.assertEqual(str(nodeset[9:10]), "wood1") self.assertEqual(str(nodeset[9:]), "wood[1-9]") nodeset = NodeSet("stone[1-9],water[10-12],wood[1-9]") self.assertEqual(str(nodeset[8:10]), "stone9,water10") self.assertEqual(str(nodeset[11:15]), "water12,wood[1-3]") nodeset = NodeSet("stone[1-9],water,wood[1-9]") self.assertEqual(str(nodeset[8:10]), "stone9,water") self.assertEqual(str(nodeset[8:11]), "stone9,water,wood1") self.assertEqual(str(nodeset[9:11]), "water,wood1") self.assertEqual(str(nodeset[9:12]), "water,wood[1-2]") def testSplit(self): """test NodeSet split()""" # Empty nodeset nodeset = NodeSet() self.assertEqual((), tuple(nodeset.split(2))) # Not enough element nodeset = NodeSet("foo[1]") self.assertEqual((NodeSet("foo[1]"),), \ tuple(nodeset.split(2))) # Exact number of elements nodeset = NodeSet("foo[1-6]") self.assertEqual((NodeSet("foo[1-2]"), NodeSet("foo[3-4]"), \ NodeSet("foo[5-6]")), tuple(nodeset.split(3))) # Check limit results nodeset = NodeSet("bar[2-4]") for i in (3, 4): self.assertEqual((NodeSet("bar2"), NodeSet("bar3"), \ NodeSet("bar4")), tuple(nodeset.split(i))) def testAdd(self): """test NodeSet add()""" nodeset = NodeSet() nodeset.add("green") self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "green") self.assertEqual(nodeset[0], "green") nodeset = NodeSet() nodeset.add("green35") self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "green35") self.assertEqual(nodeset[0], "green35") nodeset = NodeSet() nodeset.add("green[3,5-46]") self.assertEqual(len(nodeset), 43) self.assertEqual(nodeset[0], "green3") nodeset = NodeSet() nodeset.add("green[3,5-46],black64,orange[045-148]") self.assertEqual(len(nodeset), 148) self.assert_("green5" in nodeset) self.assert_("black64" in nodeset) self.assert_("orange046" in nodeset) def testAddAdjust(self): """test NodeSet adjusting add()""" # autostep OFF nodeset = NodeSet() nodeset.add("green[1-8/2]") self.assertEqual(str(nodeset), "green[1,3,5,7]") self.assertEqual(len(nodeset), 4) nodeset.add("green[6-17/2]") self.assertEqual(str(nodeset), "green[1,3,5-8,10,12,14,16]") self.assertEqual(len(nodeset), 10) # autostep ON nodeset = NodeSet(autostep=2) nodeset.add("green[1-8/2]") self.assertEqual(str(nodeset), "green[1-7/2]") self.assertEqual(len(nodeset), 4) nodeset.add("green[6-17/2]") self.assertEqual(str(nodeset), "green[1-5/2,6-7,8-16/2]") self.assertEqual(len(nodeset), 10) def testRemove(self): """test NodeSet remove()""" # from empty nodeset nodeset = NodeSet() self.assertEqual(len(nodeset), 0) self.assertRaises(KeyError, nodeset.remove, "tintin23") self.assertRaises(KeyError, nodeset.remove, "tintin[35-36]") nodeset.update("milou36") self.assertEqual(len(nodeset), 1) self.assertRaises(KeyError, nodeset.remove, "tintin23") self.assert_("milou36" in nodeset) nodeset.remove("milou36") self.assertEqual(len(nodeset), 0) nodeset.update("milou[36-60,76,95],haddock[1-12],tournesol") self.assertEqual(len(nodeset), 40) nodeset.remove("milou76") self.assertEqual(len(nodeset), 39) nodeset.remove("milou[36-39]") self.assertEqual(len(nodeset), 35) self.assertRaises(KeyError, nodeset.remove, "haddock13") self.assertEqual(len(nodeset), 35) self.assertRaises(KeyError, nodeset.remove, "haddock[1-15]") self.assertEqual(len(nodeset), 35) self.assertRaises(KeyError, nodeset.remove, "tutu") self.assertEqual(len(nodeset), 35) nodeset.remove("tournesol") self.assertEqual(len(nodeset), 34) nodeset.remove("haddock[1-12]") self.assertEqual(len(nodeset), 22) nodeset.remove("milou[40-60,95]") self.assertEqual(len(nodeset), 0) self.assertRaises(KeyError, nodeset.remove, "tournesol") self.assertRaises(KeyError, nodeset.remove, "milou40") # from non-empty nodeset nodeset = NodeSet("haddock[16-3045]") self.assertEqual(len(nodeset), 3030) self.assertRaises(KeyError, nodeset.remove, "haddock15") self.assert_("haddock16" in nodeset) self.assertEqual(len(nodeset), 3030) nodeset.remove("haddock[16,18-3044]") self.assertEqual(len(nodeset), 2) self.assertRaises(KeyError, nodeset.remove, "haddock3046") self.assertRaises(KeyError, nodeset.remove, "haddock[16,3060]") self.assertRaises(KeyError, nodeset.remove, "haddock[3045-3046]") self.assertRaises(KeyError, nodeset.remove, "haddock[3045,3049-3051/2]") nodeset.remove("haddock3045") self.assertEqual(len(nodeset), 1) self.assertRaises(KeyError, nodeset.remove, "haddock[3045]") self.assertEqual(len(nodeset), 1) nodeset.remove("haddock17") self.assertEqual(len(nodeset), 0) def testClear(self): """test NodeSet clear()""" nodeset = NodeSet("purple[35-39]") self.assertEqual(len(nodeset), 5) nodeset.clear() self.assertEqual(len(nodeset), 0) def testContains(self): """test NodeSet contains()""" nodeset = NodeSet() self.assertEqual(len(nodeset), 0) self.assert_("foo" not in nodeset) nodeset.update("bar") self.assertEqual(len(nodeset), 1) self.assertEqual(str(nodeset), "bar") self.assert_("bar" in nodeset) nodeset.update("foo[20-40]") self.assert_("foo" not in nodeset) self.assert_("foo39" in nodeset) for node in nodeset: self.assert_(node in nodeset) nodeset.update("dark[2000-4000/4]") self.assert_("dark3000" in nodeset) self.assert_("dark3002" not in nodeset) for node in nodeset: self.assert_(node in nodeset) nodeset = NodeSet("scale[0-10000]") self.assert_("black64" not in nodeset) self.assert_("scale9346" in nodeset) nodeset = NodeSet("scale[0-10000]", autostep=2) self.assert_("scale9346" in nodeset[::2]) self.assert_("scale9347" not in nodeset[::2]) def testContainsUsingPadding(self): """test NodeSet contains() when using padding""" nodeset = NodeSet("white[001,030]") nodeset.add("white113") self.assertTrue(NodeSet("white30") in nodeset) self.assertTrue(NodeSet("white030") in nodeset) # case: nodeset without padding info is compared to a # padding-initialized range self.assert_(NodeSet("white113") in nodeset) self.assert_(NodeSet("white[001,113]") in nodeset) self.assert_(NodeSet("gene0113") in NodeSet("gene[001,030,113]")) self.assert_(NodeSet("gene0113") in NodeSet("gene[0001,0030,0113]")) self.assert_(NodeSet("gene0113") in NodeSet("gene[098-113]")) self.assert_(NodeSet("gene0113") in NodeSet("gene[0098-0113]")) # case: len(str(ielem)) >= rgpad nodeset = NodeSet("white[001,099]") nodeset.add("white100") nodeset.add("white1000") self.assert_(NodeSet("white1000") in nodeset) def testIsSuperSet(self): """test NodeSet issuperset()""" nodeset = NodeSet("tronic[0036-1630]") self.assertEqual(len(nodeset), 1595) self.assert_(nodeset.issuperset("tronic[0036-1630]")) self.assert_(nodeset.issuperset("tronic[0140-0200]")) self.assert_(nodeset.issuperset(NodeSet("tronic[0140-0200]"))) self.assert_(nodeset.issuperset("tronic0070")) self.assert_(not nodeset.issuperset("tronic0034")) # check padding issue - since 1.6 padding is ignored in this case self.assert_(nodeset.issuperset("tronic36")) self.assert_(nodeset.issuperset("tronic[36-40]")) self.assert_(nodeset.issuperset(NodeSet("tronic[36-40]"))) # check gt self.assert_(nodeset > NodeSet("tronic[0100-0200]")) self.assert_(not nodeset > NodeSet("tronic[0036-1630]")) self.assert_(not nodeset > NodeSet("tronic[0036-1631]")) self.assert_(nodeset >= NodeSet("tronic[0100-0200]")) self.assert_(nodeset >= NodeSet("tronic[0036-1630]")) self.assert_(not nodeset >= NodeSet("tronic[0036-1631]")) # multiple patterns case nodeset = NodeSet("tronic[0036-1630],lounge[20-660/2]") self.assert_(nodeset > NodeSet("tronic[0100-0200]")) self.assert_(nodeset > NodeSet("lounge[36-400/2]")) self.assert_(nodeset.issuperset(NodeSet("lounge[36-400/2],tronic[0100-660]"))) self.assert_(nodeset > NodeSet("lounge[36-400/2],tronic[0100-660]")) def testIsSubSet(self): """test NodeSet issubset()""" nodeset = NodeSet("artcore[3-999]") self.assertEqual(len(nodeset), 997) self.assert_(nodeset.issubset("artcore[3-999]")) self.assert_(nodeset.issubset("artcore[1-1000]")) self.assert_(not nodeset.issubset("artcore[350-427]")) # check lt self.assert_(nodeset < NodeSet("artcore[2-32000]")) self.assert_(nodeset < NodeSet("artcore[2-32000],lounge[35-65/2]")) self.assert_(not nodeset < NodeSet("artcore[3-999]")) self.assert_(not nodeset < NodeSet("artcore[3-980]")) self.assert_(not nodeset < NodeSet("artcore[2-998]")) self.assert_(nodeset <= NodeSet("artcore[2-32000]")) self.assert_(nodeset <= NodeSet("artcore[2-32000],lounge[35-65/2]")) self.assert_(nodeset <= NodeSet("artcore[3-999]")) self.assert_(not nodeset <= NodeSet("artcore[3-980]")) self.assert_(not nodeset <= NodeSet("artcore[2-998]")) self.assertEqual(len(nodeset), 997) # check padding issue - since 1.6 padding is ignored in this case self.assert_(nodeset.issubset("artcore[0001-1000]")) self.assert_(not nodeset.issubset("artcore030")) # multiple patterns case nodeset = NodeSet("tronic[0036-1630],lounge[20-660/2]") self.assert_(nodeset < NodeSet("tronic[0036-1630],lounge[20-662/2]")) self.assert_(nodeset < NodeSet("tronic[0035-1630],lounge[20-660/2]")) self.assert_(not nodeset < NodeSet("tronic[0035-1630],lounge[22-660/2]")) self.assert_(nodeset < NodeSet("tronic[0036-1630],lounge[20-660/2],artcore[034-070]")) self.assert_(nodeset < NodeSet("tronic[0032-1880],lounge[2-700/2],artcore[039-040]")) self.assert_(nodeset.issubset("tronic[0032-1880],lounge[2-700/2],artcore[039-040]")) self.assert_(nodeset.issubset(NodeSet("tronic[0032-1880],lounge[2-700/2],artcore[039-040]"))) def testSymmetricDifference(self): """test NodeSet symmetric_difference()""" nsstr = "red[34-55,76-249,300-403],blue,green" nodeset = NodeSet(nsstr) self.assertEqual(len(nodeset), 302) nsstr2 = "red[32-57,72-249,300-341],blue,yellow" nodeset2 = NodeSet(nsstr2) self.assertEqual(len(nodeset2), 248) inodeset = nodeset.symmetric_difference(nodeset2) # originals should not change self.assertEqual(len(nodeset), 302) self.assertEqual(len(nodeset2), 248) self.assertEqual(str(nodeset), "blue,green,red[34-55,76-249,300-403]") self.assertEqual(str(nodeset2), "blue,red[32-57,72-249,300-341],yellow") # result self.assertEqual(len(inodeset), 72) self.assertEqual(str(inodeset), \ "green,red[32-33,56-57,72-75,342-403],yellow") def testSymmetricDifferenceUpdate(self): """test NodeSet symmetric_difference_update()""" nodeset = NodeSet("artcore[3-999]") self.assertEqual(len(nodeset), 997) nodeset.symmetric_difference_update("artcore[1-2000]") self.assertEqual(len(nodeset), 1003) self.assertEqual(str(nodeset), "artcore[1-2,1000-2000]") nodeset = NodeSet("artcore[3-999],lounge") self.assertEqual(len(nodeset), 998) nodeset.symmetric_difference_update("artcore[1-2000]") self.assertEqual(len(nodeset), 1004) self.assertEqual(str(nodeset), "artcore[1-2,1000-2000],lounge") nodeset = NodeSet("artcore[3-999],lounge") self.assertEqual(len(nodeset), 998) nodeset.symmetric_difference_update("artcore[1-2000],lounge") self.assertEqual(len(nodeset), 1003) self.assertEqual(str(nodeset), "artcore[1-2,1000-2000]") nodeset = NodeSet("artcore[3-999],lounge") self.assertEqual(len(nodeset), 998) nodeset2 = NodeSet("artcore[1-2000],lounge") nodeset.symmetric_difference_update(nodeset2) self.assertEqual(len(nodeset), 1003) self.assertEqual(str(nodeset), "artcore[1-2,1000-2000]") self.assertEqual(len(nodeset2), 2001) # check const argument nodeset.symmetric_difference_update("artcore[1-2000],lounge") self.assertEqual(len(nodeset), 998) self.assertEqual(str(nodeset), "artcore[3-999],lounge") def testOperatorSymmetricDifference(self): """test NodeSet symmetric_difference() and ^ operator""" nodeset = NodeSet("artcore[3-999]") self.assertEqual(len(nodeset), 997) result = nodeset.symmetric_difference("artcore[1-2000]") self.assertEqual(len(result), 1003) self.assertEqual(str(result), "artcore[1-2,1000-2000]") self.assertEqual(len(nodeset), 997) # test ^ operator nodeset = NodeSet("artcore[3-999]") self.assertEqual(len(nodeset), 997) nodeset2 = NodeSet("artcore[1-2000]") result = nodeset ^ nodeset2 self.assertEqual(len(result), 1003) self.assertEqual(str(result), "artcore[1-2,1000-2000]") self.assertEqual(len(nodeset), 997) self.assertEqual(len(nodeset2), 2000) # check that n ^ n returns empty NodeSet nodeset = NodeSet("lounge[3-999]") self.assertEqual(len(nodeset), 997) result = nodeset ^ nodeset self.assertEqual(len(result), 0) def testBinarySanityCheck(self): """test NodeSet binary sanity check""" ns1 = NodeSet("1-5") ns2 = "4-6" self.assertRaises(TypeError, ns1.__gt__, ns2) self.assertRaises(TypeError, ns1.__lt__, ns2) def testBinarySanityCheckNotImplementedSubtle(self): """test NodeSet binary sanity check (NotImplemented subtle)""" ns1 = NodeSet("1-5") ns2 = "4-6" self.assertEqual(ns1.__and__(ns2), NotImplemented) self.assertEqual(ns1.__or__(ns2), NotImplemented) self.assertEqual(ns1.__sub__(ns2), NotImplemented) self.assertEqual(ns1.__xor__(ns2), NotImplemented) # Should implicitely raises TypeError if the real operator # version is invoked. To test that, we perform a manual check # as an additional function would be needed to check with # assertRaises(): good_error = False try: ns3 = ns1 & ns2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for &") good_error = False try: ns3 = ns1 | ns2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for |") good_error = False try: ns3 = ns1 - ns2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for -") good_error = False try: ns3 = ns1 ^ ns2 except TypeError: good_error = True self.assert_(good_error, "TypeError not raised for ^") def testIsSubSetError(self): """test NodeSet issubset type error""" ns1 = NodeSet("1-5") ns2 = 4 self.assertRaises(TypeError, ns1.issubset, ns2) def testExpandFunction(self): """test NodeSet expand() utility function""" self.assertEqual(expand("purple[1-3]"), [ "purple1", "purple2", "purple3" ]) def testFoldFunction(self): """test NodeSet fold() utility function""" self.assertEqual(fold("purple1,purple2,purple3"), "purple[1-3]") def testEquality(self): """test NodeSet equality""" ns0_1 = NodeSet() ns0_2 = NodeSet() self.assertEqual(ns0_1, ns0_2) ns1 = NodeSet("roma[50-99]-ipmi,cors[113,115-117,130,166-172],cws-tigrou,tigrou3") ns2 = NodeSet("roma[50-99]-ipmi,cors[113,115-117,130,166-172],cws-tigrou,tigrou3") self.assertEqual(ns1, ns2) ns3 = NodeSet("cws-tigrou,tigrou3,cors[113,115-117,166-172],roma[50-99]-ipmi,cors130") self.assertEqual(ns1, ns3) ns4 = NodeSet("roma[50-99]-ipmi,cors[113,115-117,130,166-171],cws-tigrou,tigrou[3-4]") self.assertNotEqual(ns1, ns4) def testIterOrder(self): """test NodeSet nodes name order in iter and str""" ns_b = NodeSet("bcluster25") ns_c = NodeSet("ccluster12") ns_a1 = NodeSet("acluster4") ns_a2 = NodeSet("acluster39") ns_a3 = NodeSet("acluster41") ns = ns_c | ns_a1 | ns_b | ns_a2 | ns_a3 self.assertEqual(str(ns), "acluster[4,39,41],bcluster25,ccluster12") nodelist = list(iter(ns)) self.assertEqual(nodelist, ['acluster4', 'acluster39', 'acluster41', \ 'bcluster25', 'ccluster12']) def test_nsiter(self): """test NodeSet.nsiter() iterator""" ns1 = NodeSet("roma[50-61]-ipmi,cors[113,115-117,130,166-169],cws-tigrou,tigrou3") self.assertEqual(list(ns1), ['cors113', 'cors115', 'cors116', 'cors117', 'cors130', 'cors166', 'cors167', 'cors168', 'cors169', 'cws-tigrou', 'roma50-ipmi', 'roma51-ipmi', 'roma52-ipmi', 'roma53-ipmi', 'roma54-ipmi', 'roma55-ipmi', 'roma56-ipmi', 'roma57-ipmi', 'roma58-ipmi', 'roma59-ipmi', 'roma60-ipmi', 'roma61-ipmi', 'tigrou3']) self.assertEqual(list(ns1), [str(ns) for ns in ns1.nsiter()]) def test_contiguous(self): """test NodeSet.contiguous() iterator""" ns1 = NodeSet("cors,roma[50-61]-ipmi,cors[113,115-117,130,166-169],cws-tigrou,tigrou3") self.assertEqual(['cors', 'cors113', 'cors[115-117]', 'cors130', 'cors[166-169]', 'cws-tigrou', 'roma[50-61]-ipmi', 'tigrou3'], [str(ns) for ns in ns1.contiguous()]) def testEqualityMore(self): """test NodeSet equality (more)""" self.assertEqual(NodeSet(), NodeSet()) ns1 = NodeSet("nodealone") ns2 = NodeSet("nodealone") self.assertEqual(ns1, ns2) ns1 = NodeSet("clu3,clu[4-9],clu11") ns2 = NodeSet("clu[3-9,11]") self.assertEqual(ns1, ns2) if ns1 == None: self.fail("ns1 == None succeeded") if ns1 != None: pass else: self.fail("ns1 != None failed") def testNodeSetNone(self): """test NodeSet methods behavior with None argument""" nodeset = NodeSet(None) self.assertEqual(len(nodeset), 0) self.assertEqual(list(nodeset), []) nodeset.update(None) self.assertEqual(list(nodeset), []) nodeset.intersection_update(None) self.assertEqual(list(nodeset), []) nodeset.difference_update(None) self.assertEqual(list(nodeset), []) nodeset.symmetric_difference_update(None) self.assertEqual(list(nodeset), []) n = nodeset.union(None) self.assertEqual(list(n), []) self.assertEqual(len(n), 0) n = nodeset.intersection(None) self.assertEqual(list(n), []) n = nodeset.difference(None) self.assertEqual(list(n), []) n = nodeset.symmetric_difference(None) self.assertEqual(list(n), []) nodeset = NodeSet("abc[3,6-89],def[3-98,104,128-133]") self.assertEqual(len(nodeset), 188) nodeset.update(None) self.assertEqual(len(nodeset), 188) nodeset.intersection_update(None) self.assertEqual(len(nodeset), 0) self.assertEqual(list(nodeset), []) nodeset = NodeSet("abc[3,6-89],def[3-98,104,128-133]") self.assertEqual(len(nodeset), 188) nodeset.difference_update(None) self.assertEqual(len(nodeset), 188) nodeset.symmetric_difference_update(None) self.assertEqual(len(nodeset), 188) n = nodeset.union(None) self.assertEqual(len(nodeset), 188) n = nodeset.intersection(None) self.assertEqual(list(n), []) self.assertEqual(len(n), 0) n = nodeset.difference(None) self.assertEqual(len(n), 188) n = nodeset.symmetric_difference(None) self.assertEqual(len(n), 188) self.assertFalse(n.issubset(None)) self.assertTrue(n.issuperset(None)) n = NodeSet(None) n.clear() self.assertEqual(len(n), 0) def testCopy(self): """test NodeSet.copy()""" nodeset = NodeSet("zclu[115-117,130,166-170],glycine[68,4780-4999]") self.assertEqual(str(nodeset), \ "glycine[68,4780-4999],zclu[115-117,130,166-170]") nodeset2 = nodeset.copy() nodeset3 = nodeset.copy() self.assertEqual(nodeset, nodeset2) # content equality self.assertTrue(isinstance(nodeset, NodeSet)) self.assertTrue(isinstance(nodeset2, NodeSet)) self.assertTrue(isinstance(nodeset3, NodeSet)) nodeset2.remove("glycine68") self.assertEqual(len(nodeset), len(nodeset2) + 1) self.assertNotEqual(nodeset, nodeset2) self.assertEqual(str(nodeset2), \ "glycine[4780-4999],zclu[115-117,130,166-170]") self.assertEqual(str(nodeset), \ "glycine[68,4780-4999],zclu[115-117,130,166-170]") nodeset2.add("glycine68") self.assertEqual(str(nodeset2), \ "glycine[68,4780-4999],zclu[115-117,130,166-170]") self.assertEqual(nodeset, nodeset3) nodeset3.update(NodeSet("zclu118")) self.assertNotEqual(nodeset, nodeset3) self.assertEqual(len(nodeset) + 1, len(nodeset3)) self.assertEqual(str(nodeset), \ "glycine[68,4780-4999],zclu[115-117,130,166-170]") self.assertEqual(str(nodeset3), \ "glycine[68,4780-4999],zclu[115-118,130,166-170]") def test_unpickle_v1_3_py24(self): """test NodeSet unpickling (against v1.3/py24)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGKGNDbHVzdGVyU2hlbGwuTm9kZVNldApSYW5nZVNldApxB29xCH1xCShoA0sBVQlfYXV0b3N0ZXBxCkdUskmtJZTDfVUHX3Jhbmdlc3ELXXEMKEsESwRLAUsAdHENYXViVQZibHVlJXNxDihoB29xD31xEChoA0sIaApHVLJJrSWUw31oC11xESgoSwZLCksBSwB0cRIoSw1LDUsBSwB0cRMoSw9LD0sBSwB0cRQoSxFLEUsBSwB0cRVldWJVB2dyZWVuJXNxFihoB29xF31xGChoA0tlaApHVLJJrSWUw31oC11xGShLAEtkSwFLAHRxGmF1YlUDcmVkcRtOdWgKTnViLg==")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") # unpickle_v1_4_py24 : unpickling fails as v1.4 does not have slice pickling workaround def test_unpickle_v1_3_py26(self): """test NodeSet unpickling (against v1.3/py26)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGKGNDbHVzdGVyU2hlbGwuTm9kZVNldApSYW5nZVNldApxB29xCH1xCShoA0sBVQlfYXV0b3N0ZXBxCkdUskmtJZTDfVUHX3Jhbmdlc3ELXXEMKEsESwRLAUsAdHENYXViVQZibHVlJXNxDihoB29xD31xEChoA0sIaApHVLJJrSWUw31oC11xESgoSwZLCksBSwB0cRIoSw1LDUsBSwB0cRMoSw9LD0sBSwB0cRQoSxFLEUsBSwB0cRVldWJVB2dyZWVuJXNxFihoB29xF31xGChoA0tlaApHVLJJrSWUw31oC11xGShLAEtkSwFLAHRxGmF1YlUDcmVkcRtOdWgKTnViLg==")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") # unpickle_v1_4_py24 : unpickling fails as v1.4 does not have slice pickling workaround def test_unpickle_v1_4_py26(self): """test NodeSet unpickling (against v1.4/py26)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGKGNDbHVzdGVyU2hlbGwuTm9kZVNldApSYW5nZVNldApxB29xCH1xCihoA0sBVQlfYXV0b3N0ZXBxC0dUskmtJZTDfVUHX3Jhbmdlc3EMXXENY19fYnVpbHRpbl9fCnNsaWNlCnEOSwRLBUsBh3EPUnEQSwCGcRFhVQhfdmVyc2lvbnESSwJ1YlUGYmx1ZSVzcRMoaAdvcRR9cRUoaANLCGgLR1SySa0llMN9aAxdcRYoaA5LBksLSwGHcRdScRhLAIZxGWgOSw1LDksBh3EaUnEbSwCGcRxoDksPSxBLAYdxHVJxHksAhnEfaA5LEUsSSwGHcSBScSFLAIZxImVoEksCdWJVB2dyZWVuJXNxIyhoB29xJH1xJShoA0tlaAtHVLJJrSWUw31oDF1xJmgOSwBLZUsBh3EnUnEoSwCGcSlhaBJLAnViVQNyZWRxKk51aAtOdWIu")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") def test_unpickle_v1_5_py24(self): """test NodeSet unpickling (against v1.5/py24)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGKGNDbHVzdGVyU2hlbGwuTm9kZVNldApSYW5nZVNldApxB29xCH1xCihoA0sBVQlfYXV0b3N0ZXBxC0dUskmtJZTDfVUHX3Jhbmdlc3EMXXENSwRLBUsBh3EOSwCGcQ9hVQhfdmVyc2lvbnEQSwJ1YlUGYmx1ZSVzcREoaAdvcRJ9cRMoaANLCGgLR1SySa0llMN9aAxdcRQoSwZLC0sBh3EVSwCGcRZLDUsOSwGHcRdLAIZxGEsPSxBLAYdxGUsAhnEaSxFLEksBh3EbSwCGcRxlaBBLAnViVQdncmVlbiVzcR0oaAdvcR59cR8oaANLZWgLR1SySa0llMN9aAxdcSBLAEtlSwGHcSFLAIZxImFoEEsCdWJVA3JlZHEjTnVoC051Yi4=")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") def test_unpickle_v1_5_py26(self): """test NodeSet unpickling (against v1.5/py26)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGKGNDbHVzdGVyU2hlbGwuTm9kZVNldApSYW5nZVNldApxB29xCH1xCihoA0sBVQlfYXV0b3N0ZXBxC0dUskmtJZTDfVUHX3Jhbmdlc3EMXXENY19fYnVpbHRpbl9fCnNsaWNlCnEOSwRLBUsBh3EPUnEQSwCGcRFhVQhfdmVyc2lvbnESSwJ1YlUGYmx1ZSVzcRMoaAdvcRR9cRUoaANLCGgLR1SySa0llMN9aAxdcRYoaA5LBksLSwGHcRdScRhLAIZxGWgOSw1LDksBh3EaUnEbSwCGcRxoDksPSxBLAYdxHVJxHksAhnEfaA5LEUsSSwGHcSBScSFLAIZxImVoEksCdWJVB2dyZWVuJXNxIyhoB29xJH1xJShoA0tlaAtHVLJJrSWUw31oDF1xJmgOSwBLZUsBh3EnUnEoSwCGcSlhaBJLAnViVQNyZWRxKk51aAtOdWIu")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") def test_unpickle_v1_6_py24(self): """test NodeSet unpickling (against v1.6/py24)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGY0NsdXN0ZXJTaGVsbC5SYW5nZVNldApSYW5nZVNldApxB1UBNHEIhXEJUnEKfXELKFUHcGFkZGluZ3EMTlUJX2F1dG9zdGVwcQ1HVLJJrSWUw31VCF92ZXJzaW9ucQ5LA3ViVQZibHVlJXNxD2gHVQ02LTEwLDEzLDE1LDE3cRCFcRFScRJ9cRMoaAxOaA1HVLJJrSWUw31oDksDdWJVB2dyZWVuJXNxFGgHVQUwLTEwMHEVhXEWUnEXfXEYKGgMTmgNR1SySa0llMN9aA5LA3ViVQNyZWRxGU51aA1OdWIu")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") def test_unpickle_v1_6_py26(self): """test NodeSet unpickling (against v1.6/py26)""" nodeset = pickle.loads(binascii.a2b_base64("gAJjQ2x1c3RlclNoZWxsLk5vZGVTZXQKTm9kZVNldApxACmBcQF9cQIoVQdfbGVuZ3RocQNLAFUJX3BhdHRlcm5zcQR9cQUoVQh5ZWxsb3clc3EGY0NsdXN0ZXJTaGVsbC5SYW5nZVNldApSYW5nZVNldApxB1UBNHEIhXEJUnEKfXELKFUHcGFkZGluZ3EMTlUJX2F1dG9zdGVwcQ1HVLJJrSWUw31VCF92ZXJzaW9ucQ5LA3ViVQZibHVlJXNxD2gHVQ02LTEwLDEzLDE1LDE3cRCFcRFScRJ9cRMoaAxOaA1HVLJJrSWUw31oDksDdWJVB2dyZWVuJXNxFGgHVQUwLTEwMHEVhXEWUnEXfXEYKGgMTmgNR1SySa0llMN9aA5LA3ViVQNyZWRxGU51aA1OdWIu")) self.assertEqual(nodeset, NodeSet("blue[6-10,13,15,17],green[0-100],red,yellow4")) self.assertEqual(str(nodeset), "blue[6-10,13,15,17],green[0-100],red,yellow4") self.assertEqual(len(nodeset), 111) self.assertEqual(nodeset[0], "blue6") self.assertEqual(nodeset[1], "blue7") self.assertEqual(nodeset[-1], "yellow4") def test_pickle_current(self): """test NodeSet pickling (current version)""" dump = pickle.dumps(NodeSet("foo[1-100]")) self.assertNotEqual(dump, None) nodeset = pickle.loads(dump) self.assertEqual(nodeset, NodeSet("foo[1-100]")) self.assertEqual(str(nodeset), "foo[1-100]") self.assertEqual(nodeset[0], "foo1") self.assertEqual(nodeset[1], "foo2") self.assertEqual(nodeset[-1], "foo100") def testNodeSetBase(self): """test underlying NodeSetBase class""" rset = RangeSet("1-100,200") self.assertEqual(len(rset), 101) nsb = NodeSetBase("foo%sbar", rset) self.assertEqual(len(nsb), len(rset)) self.assertEqual(str(nsb), "foo[1-100,200]bar") nsbcpy = nsb.copy() self.assertEqual(len(nsbcpy), 101) self.assertEqual(str(nsbcpy), "foo[1-100,200]bar") other = NodeSetBase("foo%sbar", RangeSet("201")) nsbcpy.add(other) self.assertEqual(len(nsb), 101) self.assertEqual(str(nsb), "foo[1-100,200]bar") self.assertEqual(len(nsbcpy), 102) self.assertEqual(str(nsbcpy), "foo[1-100,200-201]bar") def testNodeGroupBase(self): """test underlying NodeGroupBase class""" ngb = NodeGroupBase("@group") self.assertEqual(len(ngb), 1) self.assertEqual(str(ngb), "@group") self.assertRaises(ValueError, NodeGroupBase, "badgroup") if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(NodeSetTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/TaskRLimitsTest.py0000644000130500135250000000556311741571247020216 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell task resource consumption/limits test suite # Written by S. Thiell 2010-10-19 """Unit test for ClusterShell Task (resource limits)""" import resource import subprocess import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.Task import * from ClusterShell.Worker.Pdsh import WorkerPdsh class TaskRLimitsTest(unittest.TestCase): def setUp(self): """set soft nofile resource limit to 100""" subprocess.call(["ls", "-x", "/proc/self/fd"], stdout=sys.stdout) self.soft, self.hard = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (100, self.hard)) def tearDown(self): """restore original resource limits""" resource.setrlimit(resource.RLIMIT_NOFILE, (self.soft, self.hard)) def _testPopen(self, stderr): task = task_self() self.assert_(task != None) task.set_info("fanout", 10) for i in xrange(2000): worker = task.shell("/bin/hostname", stderr=stderr) self.assert_(worker != None) # run task task.resume() def testPopen(self): """test resource usage with local task.shell(stderr=False)""" self._testPopen(False) def testPopenStderr(self): """test resource usage with local task.shell(stderr=True)""" self._testPopen(True) def _testRemote(self, stderr): task = task_self() self.assert_(task != None) task.set_info("fanout", 10) for i in xrange(400): worker = task.shell("/bin/hostname", nodes="localhost", stderr=stderr) self.assert_(worker != None) # run task task.resume() def testRemote(self): """test resource usage with remote task.shell(stderr=False)""" self._testRemote(False) def testRemoteStderr(self): """test resource usage with remote task.shell(stderr=True)""" self._testRemote(True) def _testRemotePdsh(self, stderr): task = task_self() self.assert_(task != None) task.set_info("fanout", 10) for i in xrange(200): worker = WorkerPdsh("localhost", handler=None, timeout=0, command="/bin/hostname", stderr=stderr) self.assert_(worker != None) task.schedule(worker) # run task task.resume() def testRemotePdsh(self): """test resource usage with WorkerPdsh(stderr=False)""" self._testRemotePdsh(False) def testRemotePdshStderr(self): """test resource usage with WorkerPdsh(stderr=True)""" self._testRemotePdsh(True) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TaskRLimitsTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/test_command.py0000644000130500135250000000223711741571247017620 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test command """ test_command.py [--help] [--test=test] [--rc=retcode] [--timeout=timeout] """ import getopt import sys import time import unittest def testHuge(): for i in range(0, 100000): print "huge! ", def testCmpOut(): print "abcdefghijklmnopqrstuvwxyz" def testTimeout(howlong): print "some buffer" print "here..." sys.stdout.flush() time.sleep(howlong) if __name__ == '__main__': rc = 0 test = None try: opts, args = getopt.getopt(sys.argv[1:], "ht:r:m:", ["help", "test=", "rc=", "timeout="]) except getopt.error, msg: print msg print "Try `python %s -h' for more information." % sys.argv[0] sys.exit(2) for k, v in opts: if k in ("-t", "--test"): if v == "huge": test = testHuge elif v == "cmp_out": test = testCmpOut elif k in ("-r", "--rc"): rc = int(v) elif k in ("-m", "--timeout"): testTimeout(int(v)) elif k in ("-h", "--help"): print __doc__ sys.exit(0) if test: test() sys.exit(rc) clustershell-1.6/tests/CLINodesetTest.py0000644000130500135250000006356211741571247017744 0ustar thiellgpocre#!/usr/bin/env python # scripts/nodeset.py tool test suite # Written by S. Thiell 2012-03-25 """Unit test for CLI/Nodeset.py""" import sys import unittest from TLib import * from ClusterShell.CLI.Nodeset import main from ClusterShell.NodeUtils import GroupResolverConfig from ClusterShell.NodeSet import DEF_RESOLVER_STD_GROUP import ClusterShell.NodeSet class CLINodesetTest(unittest.TestCase): """Unit test class for testing CLI/Nodeset.py""" def _nodeset_t(self, args, input, expected_stdout, expected_rc=0, expected_stderr=None): CLI_main(self, main, [ 'nodeset' ] + args, input, expected_stdout, expected_rc, expected_stderr) def _battery_count(self, args): self._nodeset_t(args + ["--count", "foo"], None, "1\n") self._nodeset_t(args + ["--count", "foo", "bar"], None, "2\n") self._nodeset_t(args + ["--count", "foo", "foo"], None, "1\n") self._nodeset_t(args + ["--count", "foo", "foo", "bar"], None, "2\n") self._nodeset_t(args + ["--count", "foo[0]"], None, "1\n") self._nodeset_t(args + ["--count", "foo[2]"], None, "1\n") self._nodeset_t(args + ["--count", "foo[1,2]"], None, "2\n") self._nodeset_t(args + ["--count", "foo[1-2]"], None, "2\n") self._nodeset_t(args + ["--count", "foo[1,2]", "foo[1-2]"], None, "2\n") self._nodeset_t(args + ["--count", "foo[1-200,245-394]"], None, "350\n") self._nodeset_t(args + ["--count", "foo[395-442]", "foo[1-200,245-394]"], None, "398\n") self._nodeset_t(args + ["--count", "foo[395-442]", "foo", "foo[1-200,245-394]"], None, "399\n") self._nodeset_t(args + ["--count", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "400\n") self._nodeset_t(args + ["--count", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "400\n") # from stdin self._nodeset_t(args + ["--count"], "foo\n", "1\n") self._nodeset_t(args + ["--count"], "foo\nbar\n", "2\n") self._nodeset_t(args + ["--count"], "foo\nfoo\n", "1\n") self._nodeset_t(args + ["--count"], "foo\nfoo\nbar\n", "2\n") self._nodeset_t(args + ["--count"], "foo[0]\n", "1\n") self._nodeset_t(args + ["--count"], "foo[2]\n", "1\n") self._nodeset_t(args + ["--count"], "foo[1,2]\n", "2\n") self._nodeset_t(args + ["--count"], "foo[1-2]\n", "2\n") self._nodeset_t(args + ["--count"], "foo[1,2]\nfoo[1-2]\n", "2\n") self._nodeset_t(args + ["--count"], "foo[1-200,245-394]\n", "350\n") self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo[1-200,245-394]\n", "398\n") self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo\nfoo[1-200,245-394]\n", "399\n") self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo\nfoo[0-200,245-394]\n", "400\n") self._nodeset_t(args + ["--count"], "foo[395-442]\nbar3,bar24\nfoo[1-200,245-394]\n", "400\n") def test_001_count(self): """test nodeset --count""" self._battery_count([]) self._battery_count(["--autostep=1"]) self._battery_count(["--autostep=2"]) self._battery_count(["--autostep=5"]) def test_002_count_intersection(self): """test nodeset --count --intersection""" self._nodeset_t(["--count", "foo", "--intersection", "bar"], None, "0\n") self._nodeset_t(["--count", "foo", "--intersection", "foo"], None, "1\n") self._nodeset_t(["--count", "foo", "--intersection", "foo", "-i", "bar"], None, "0\n") self._nodeset_t(["--count", "foo[0]", "--intersection", "foo0"], None, "1\n") self._nodeset_t(["--count", "foo[2]", "--intersection", "foo"], None, "0\n") self._nodeset_t(["--count", "foo[1,2]", "--intersection", "foo[1-2]"], None, "2\n") self._nodeset_t(["--count", "foo[395-442]", "--intersection", "foo[1-200,245-394]"], None, "0\n") self._nodeset_t(["--count", "foo[395-442]", "--intersection", "foo", "-i", "foo[1-200,245-394]"], None, "0\n") self._nodeset_t(["--count", "foo[395-442]", "-i", "foo", "-i", "foo[0-200,245-394]"], None, "0\n") self._nodeset_t(["--count", "foo[395-442]", "--intersection", "bar3,bar24", "-i", "foo[1-200,245-394]"], None, "0\n") def test_003_count_intersection_stdin(self): """test nodeset --count --intersection (stdin)""" self._nodeset_t(["--count", "--intersection", "bar"], "foo\n", "0\n") self._nodeset_t(["--count", "--intersection", "foo"], "foo\n", "1\n") self._nodeset_t(["--count", "--intersection", "foo", "-i", "bar"], "foo\n", "0\n") self._nodeset_t(["--count", "--intersection", "foo0"], "foo[0]\n", "1\n") self._nodeset_t(["--count", "--intersection", "foo"], "foo[2]\n", "0\n") self._nodeset_t(["--count", "--intersection", "foo[1-2]"], "foo[1,2]\n", "2\n") self._nodeset_t(["--count", "--intersection", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n") self._nodeset_t(["--count", "--intersection", "foo", "-i", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n") self._nodeset_t(["--count", "-i", "foo", "-i", "foo[0-200,245-394]"], "foo[395-442]\n", "0\n") self._nodeset_t(["--count", "--intersection", "bar3,bar24", "-i", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n") def _battery_fold(self, args): self._nodeset_t(args + ["--fold", "foo"], None, "foo\n") self._nodeset_t(args + ["--fold", "foo", "bar"], None, "bar,foo\n") self._nodeset_t(args + ["--fold", "foo", "foo"], None, "foo\n") self._nodeset_t(args + ["--fold", "foo", "foo", "bar"], None, "bar,foo\n") self._nodeset_t(args + ["--fold", "foo[0]"], None, "foo0\n") self._nodeset_t(args + ["--fold", "foo[2]"], None, "foo2\n") self._nodeset_t(args + ["--fold", "foo[1,2]"], None, "foo[1-2]\n") self._nodeset_t(args + ["--fold", "foo[1-2]"], None, "foo[1-2]\n") self._nodeset_t(args + ["--fold", "foo[1,2]", "foo[1-2]"], None, "foo[1-2]\n") self._nodeset_t(args + ["--fold", "foo[1-200,245-394]"], None, "foo[1-200,245-394]\n") self._nodeset_t(args + ["--fold", "foo[395-442]", "foo[1-200,245-394]"], None, "foo[1-200,245-442]\n") self._nodeset_t(args + ["--fold", "foo[395-442]", "foo", "foo[1-200,245-394]"], None, "foo,foo[1-200,245-442]\n") self._nodeset_t(args + ["--fold", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "foo,foo[0-200,245-442]\n") self._nodeset_t(args + ["--fold", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "bar[3,24],foo[1-200,245-442]\n") # stdin self._nodeset_t(args + ["--fold"], "foo\n", "foo\n") self._nodeset_t(args + ["--fold"], "foo\nbar\n", "bar,foo\n") self._nodeset_t(args + ["--fold"], "foo\nfoo\n", "foo\n") self._nodeset_t(args + ["--fold"], "foo\nfoo\nbar\n", "bar,foo\n") self._nodeset_t(args + ["--fold"], "foo[0]\n", "foo0\n") self._nodeset_t(args + ["--fold"], "foo[2]\n", "foo2\n") self._nodeset_t(args + ["--fold"], "foo[1,2]\n", "foo[1-2]\n") self._nodeset_t(args + ["--fold"], "foo[1-2]\n", "foo[1-2]\n") self._nodeset_t(args + ["--fold"], "foo[1,2]\nfoo[1-2]\n", "foo[1-2]\n") self._nodeset_t(args + ["--fold"], "foo[1-200,245-394]\n", "foo[1-200,245-394]\n") self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo[1-200,245-394]\n", "foo[1-200,245-442]\n") self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo\nfoo[1-200,245-394]\n", "foo,foo[1-200,245-442]\n") self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo\nfoo[0-200,245-394]\n", "foo,foo[0-200,245-442]\n") self._nodeset_t(args + ["--fold"], "foo[395-442]\nbar3,bar24\nfoo[1-200,245-394]\n", "bar[3,24],foo[1-200,245-442]\n") def test_004_fold(self): """test nodeset --fold""" self._battery_fold([]) self._battery_fold(["--autostep=3"]) def test_005_fold_autostep(self): """test nodeset --fold --autostep=X""" self._nodeset_t(["--autostep=2", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n") self._nodeset_t(["--autostep=2", "-f", "foo4", "foo2", "foo0", "foo6"], None, "foo[0-6/2]\n") self._nodeset_t(["--autostep=3", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n") self._nodeset_t(["--autostep=4", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n") self._nodeset_t(["--autostep=5", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0,2,4,6]\n") def test_006_expand(self): """test nodeset --expand""" self._nodeset_t(["--expand", "foo"], None, "foo\n") self._nodeset_t(["--expand", "foo", "bar"], None, "bar foo\n") self._nodeset_t(["--expand", "foo", "foo"], None, "foo\n") self._nodeset_t(["--expand", "foo[0]"], None, "foo0\n") self._nodeset_t(["--expand", "foo[2]"], None, "foo2\n") self._nodeset_t(["--expand", "foo[1,2]"], None, "foo1 foo2\n") self._nodeset_t(["--expand", "foo[1-2]"], None, "foo1 foo2\n") self._nodeset_t(["--expand", "foo[1-2],bar"], None, "bar foo1 foo2\n") def test_007_expand_stdin(self): """test nodeset --expand (stdin)""" self._nodeset_t(["--expand"], "foo\n", "foo\n") self._nodeset_t(["--expand"], "foo\nbar\n", "bar foo\n") self._nodeset_t(["--expand"], "foo\nfoo\n", "foo\n") self._nodeset_t(["--expand"], "foo[0]\n", "foo0\n") self._nodeset_t(["--expand"], "foo[2]\n", "foo2\n") self._nodeset_t(["--expand"], "foo[1,2]\n", "foo1 foo2\n") self._nodeset_t(["--expand"], "foo[1-2]\n", "foo1 foo2\n") self._nodeset_t(["--expand"], "foo[1-2],bar\n", "bar foo1 foo2\n") def test_008_expand_separator(self): """test nodeset --expand -S""" self._nodeset_t(["--expand", "-S", ":", "foo"], None, "foo\n") self._nodeset_t(["--expand", "-S", ":", "foo", "bar"], None, "bar:foo\n") self._nodeset_t(["--expand", "--separator", ":", "foo", "bar"], None, "bar:foo\n") self._nodeset_t(["--expand", "--separator=:", "foo", "bar"], None, "bar:foo\n") self._nodeset_t(["--expand", "-S", ":", "foo", "foo"], None, "foo\n") self._nodeset_t(["--expand", "-S", ":", "foo[0]"], None, "foo0\n") self._nodeset_t(["--expand", "-S", ":", "foo[2]"], None, "foo2\n") self._nodeset_t(["--expand", "-S", ":", "foo[1,2]"], None, "foo1:foo2\n") self._nodeset_t(["--expand", "-S", ":", "foo[1-2]"], None, "foo1:foo2\n") self._nodeset_t(["--expand", "-S", " ", "foo[1-2]"], None, "foo1 foo2\n") self._nodeset_t(["--expand", "-S", ",", "foo[1-2],bar"], None, "bar,foo1,foo2\n") self._nodeset_t(["--expand", "-S", "uuu", "foo[1-2],bar"], None, "baruuufoo1uuufoo2\n") self._nodeset_t(["--expand", "-S", "\\n", "foo[1-2]"], None, "foo1\nfoo2\n") def test_009_fold_xor(self): """test nodeset --fold --xor""" self._nodeset_t(["--fold", "foo", "-X", "bar"], None, "bar,foo\n") self._nodeset_t(["--fold", "foo", "-X", "foo"], None, "\n") self._nodeset_t(["--fold", "foo[1,2]", "-X", "foo[1-2]"], None, "\n") self._nodeset_t(["--fold", "foo[1-10]", "-X", "foo[5-15]"], None, "foo[1-4,11-15]\n") self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo[1-200,245-394]"], None, "foo[1-200,245-442]\n") self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo", "-X", "foo[1-200,245-394]"], None, "foo,foo[1-200,245-442]\n") self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo", "-X", "foo[0-200,245-394]"], None, "foo,foo[0-200,245-442]\n") self._nodeset_t(["--fold", "foo[395-442]", "-X", "bar3,bar24", "-X", "foo[1-200,245-394]"], None, "bar[3,24],foo[1-200,245-442]\n") def test_010_fold_xor_stdin(self): """test nodeset --fold --xor (stdin)""" self._nodeset_t(["--fold", "-X", "bar"], "foo\n", "bar,foo\n") self._nodeset_t(["--fold", "-X", "foo"], "foo\n", "\n") self._nodeset_t(["--fold", "-X", "foo[1-2]"], "foo[1,2]\n", "\n") self._nodeset_t(["--fold", "-X", "foo[5-15]"], "foo[1-10]\n", "foo[1-4,11-15]\n") self._nodeset_t(["--fold", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "foo[1-200,245-442]\n") self._nodeset_t(["--fold", "-X", "foo", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "foo,foo[1-200,245-442]\n") self._nodeset_t(["--fold", "-X", "foo", "-X", "foo[0-200,245-394]"], "foo[395-442]\n", "foo,foo[0-200,245-442]\n") self._nodeset_t(["--fold", "-X", "bar3,bar24", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "bar[3,24],foo[1-200,245-442]\n") # using stdin for -X self._nodeset_t(["-f","foo[2-4]","-X","-"], "foo4 foo5 foo6\n", "foo[2-3,5-6]\n") self._nodeset_t(["-f","-X","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n") def test_011_fold_exclude(self): """test nodeset --fold --exclude""" # Empty result self._nodeset_t(["--fold", "foo", "-x", "foo"], None, "\n") # With no range self._nodeset_t(["--fold", "foo,bar", "-x", "foo"], None, "bar\n") # Normal with range self._nodeset_t(["--fold", "foo[0-5]", "-x", "foo[0-10]"], None, "\n") self._nodeset_t(["--fold", "foo[0-10]", "-x", "foo[0-5]"], None, "foo[6-10]\n") # Do no change self._nodeset_t(["--fold", "foo[6-10]", "-x", "bar[0-5]"], None, "foo[6-10]\n") self._nodeset_t(["--fold", "foo[0-10]", "foo[13-18]", "--exclude", "foo[5-10,15]"], None, "foo[0-4,13-14,16-18]\n") def test_012_fold_exclude_stdin(self): """test nodeset --fold --exclude (stdin)""" # Empty result self._nodeset_t(["--fold", "-x", "foo"], "", "\n") self._nodeset_t(["--fold", "-x", "foo"], "\n", "\n") self._nodeset_t(["--fold", "-x", "foo"], "foo\n", "\n") # With no range self._nodeset_t(["--fold", "-x", "foo"], "foo,bar\n", "bar\n") # Normal with range self._nodeset_t(["--fold", "-x", "foo[0-10]"], "foo[0-5]\n", "\n") self._nodeset_t(["--fold", "-x", "foo[0-5]"], "foo[0-10]\n", "foo[6-10]\n") # Do no change self._nodeset_t(["--fold", "-x", "bar[0-5]"], "foo[6-10]\n", "foo[6-10]\n") self._nodeset_t(["--fold", "--exclude", "foo[5-10,15]"], "foo[0-10]\nfoo[13-18]\n", "foo[0-4,13-14,16-18]\n") # using stdin for -x self._nodeset_t(["-f","foo[1-6]","-x","-"], "foo4 foo5 foo6\n", "foo[1-3]\n") self._nodeset_t(["-f","-x","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n") def test_013_fold_intersection(self): """test nodeset --fold --intersection""" # Empty result self._nodeset_t(["--fold", "foo", "-i", "foo"], None, "foo\n") # With no range self._nodeset_t(["--fold", "foo,bar", "--intersection", "foo"], None, "foo\n") # Normal with range self._nodeset_t(["--fold", "foo[0-5]", "-i", "foo[0-10]"], None, "foo[0-5]\n") self._nodeset_t(["--fold", "foo[0-10]", "-i", "foo[0-5]"], None, "foo[0-5]\n") self._nodeset_t(["--fold", "foo[6-10]", "-i", "bar[0-5]"], None, "\n") self._nodeset_t(["--fold", "foo[0-10]", "foo[13-18]", "-i", "foo[5-10,15]"], None, "foo[5-10,15]\n") def test_014_fold_intersection_stdin(self): """test nodeset --fold --intersection (stdin)""" # Empty result self._nodeset_t(["--fold", "--intersection", "foo"], "", "\n") self._nodeset_t(["--fold", "--intersection", "foo"], "\n", "\n") self._nodeset_t(["--fold", "-i", "foo"], "foo\n", "foo\n") # With no range self._nodeset_t(["--fold", "-i", "foo"], "foo,bar\n", "foo\n") # Normal with range self._nodeset_t(["--fold", "-i", "foo[0-10]"], "foo[0-5]\n", "foo[0-5]\n") self._nodeset_t(["--fold", "-i", "foo[0-5]"], "foo[0-10]\n", "foo[0-5]\n") # Do no change self._nodeset_t(["--fold", "-i", "bar[0-5]"], "foo[6-10]\n", "\n") self._nodeset_t(["--fold", "-i", "foo[5-10,15]"], "foo[0-10]\nfoo[13-18]\n", "foo[5-10,15]\n") # using stdin for -i self._nodeset_t(["-f","foo[1-6]","-i","-"], "foo4 foo5 foo6\n", "foo[4-6]\n") self._nodeset_t(["-f","-i","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n") def test_015_rangeset(self): """test nodeset --rangeset""" self._nodeset_t(["--fold","--rangeset","1,2"], None, "1-2\n") self._nodeset_t(["--expand","-R","1-2"], None, "1 2\n") self._nodeset_t(["--fold","-R","1-2","-X","2-3"], None, "1,3\n") def test_016_rangeset_stdin(self): """test nodeset --rangeset (stdin)""" self._nodeset_t(["--fold","--rangeset"], "1,2\n", "1-2\n") self._nodeset_t(["--expand","-R"], "1-2\n", "1 2\n") self._nodeset_t(["--fold","-R","-X","2-3"], "1-2\n", "1,3\n") def test_017_stdin(self): """test nodeset - (stdin)""" self._nodeset_t(["-f","-"], "foo\n", "foo\n") self._nodeset_t(["-f","-"], "foo1 foo2 foo3\n", "foo[1-3]\n") self._nodeset_t(["--autostep=2", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n") def test_018_split(self): """test nodeset --split""" self._nodeset_t(["--split=2","-f", "bar"], None, "bar\n") self._nodeset_t(["--split", "2","-f", "foo,bar"], None, "bar\nfoo\n") self._nodeset_t(["--split", "2","-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar bur foo\ngcc oof\n") self._nodeset_t(["--split=2","-f", "foo[2-9]"], None, "foo[2-5]\nfoo[6-9]\n") self._nodeset_t(["--split=2","-f", "foo[2-3,7]", "bar9"], None, "bar9,foo2\nfoo[3,7]\n") self._nodeset_t(["--split=3","-f", "foo[2-9]"], None, "foo[2-4]\nfoo[5-7]\nfoo[8-9]\n") self._nodeset_t(["--split=1","-f", "foo2", "foo3"], None, "foo[2-3]\n") self._nodeset_t(["--split=4","-f", "foo[2-3]"], None, "foo2\nfoo3\n") self._nodeset_t(["--split=4","-f", "foo3", "foo2"], None, "foo2\nfoo3\n") self._nodeset_t(["--split=2","-e", "foo[2-9]"], None, "foo2 foo3 foo4 foo5\nfoo6 foo7 foo8 foo9\n") self._nodeset_t(["--split=3","-e", "foo[2-9]"], None, "foo2 foo3 foo4\nfoo5 foo6 foo7\nfoo8 foo9\n") self._nodeset_t(["--split=1","-e", "foo3", "foo2"], None, "foo2 foo3\n") self._nodeset_t(["--split=4","-e", "foo[2-3]"], None, "foo2\nfoo3\n") self._nodeset_t(["--split=4","-e", "foo2", "foo3"], None, "foo2\nfoo3\n") self._nodeset_t(["--split=2","-c", "foo2", "foo3"], None, "1\n1\n") def test_019_contiguous(self): """test nodeset --contiguous""" self._nodeset_t(["--contiguous", "-f", "bar"], None, "bar\n") self._nodeset_t(["--contiguous", "-f", "foo,bar"], None, "bar\nfoo\n") self._nodeset_t(["--contiguous", "-f", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n") self._nodeset_t(["--contiguous", "-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n") self._nodeset_t(["--contiguous", "-f", "foo2"], None, "foo2\n") self._nodeset_t(["--contiguous", "-R", "-f", "2"], None, "2\n") self._nodeset_t(["--contiguous", "-f", "foo[2-9]"], None, "foo[2-9]\n") self._nodeset_t(["--contiguous", "-f", "foo[2-3,7]", "bar9"], None, "bar9\nfoo[2-3]\nfoo7\n") self._nodeset_t(["--contiguous", "-R", "-f", "2-3,7", "9"], None, "2-3\n7\n9\n") self._nodeset_t(["--contiguous", "-f", "foo2", "foo3"], None, "foo[2-3]\n") self._nodeset_t(["--contiguous", "-f", "foo3", "foo2"], None, "foo[2-3]\n") self._nodeset_t(["--contiguous", "-f", "foo3", "foo1"], None, "foo1\nfoo3\n") self._nodeset_t(["--contiguous", "-f", "foo[1-5/2]", "foo7"], None, "foo1\nfoo3\nfoo5\nfoo7\n") def test_020_slice(self): """test nodeset -I/--slice""" self._nodeset_t(["--slice=0","-f", "bar"], None, "bar\n") self._nodeset_t(["--slice=0","-e", "bar"], None, "bar\n") self._nodeset_t(["--slice=1","-f", "bar"], None, "\n") self._nodeset_t(["--slice=0-1","-f", "bar"], None, "bar\n") self._nodeset_t(["-I0","-f", "bar[34-68,89-90]"], None, "bar34\n") self._nodeset_t(["-R", "-I0","-f", "34-68,89-90"], None, "34\n") self._nodeset_t(["-I 0","-f", "bar[34-68,89-90]"], None, "bar34\n") self._nodeset_t(["-I 0","-e", "bar[34-68,89-90]"], None, "bar34\n") self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n") self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar34"], None, "bar[35-38]\n") self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar35"], None, "bar[34,36-38]\n") self._nodeset_t(["-I 0-3","-e", "bar[34-68,89-90]"], None, "bar34 bar35 bar36 bar37\n") self._nodeset_t(["-I 3,1,0,2","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n") self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f", "bar[34-68,89-90]"], None, "bar[35,37,41,44,50,54,64,68,89]\n") self._nodeset_t(["-I 8","-f", "bar[34-68,89-90]"], None, "bar42\n") self._nodeset_t(["-I 8-100","-f", "bar[34-68,89-90]"], None, "bar[42-68,89-90]\n") self._nodeset_t(["-I 0-100","-f", "bar[34-68,89-90]"], None, "bar[34-68,89-90]\n") self._nodeset_t(["-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n") self._nodeset_t(["--autostep=2", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42-68/2,90]\n") def test_021_slice_stdin(self): """test nodeset -I/--slice (stdin)""" self._nodeset_t(["--slice=0","-f"], "bar\n", "bar\n") self._nodeset_t(["--slice=0","-e"], "bar\n", "bar\n") self._nodeset_t(["--slice=1","-f"], "bar\n", "\n") self._nodeset_t(["--slice=0-1","-f"], "bar\n", "bar\n") self._nodeset_t(["-I0","-f"], "bar[34-68,89-90]\n", "bar34\n") self._nodeset_t(["-R", "-I0","-f"], "34-68,89-90\n", "34\n") self._nodeset_t(["-I 0","-f"], "bar[34-68,89-90]\n", "bar34\n") self._nodeset_t(["-I 0","-e"], "bar[34-68,89-90]\n", "bar34\n") self._nodeset_t(["-I 0-3","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n") self._nodeset_t(["-I 0-3","-f", "-x", "bar34"], "bar[34-68,89-90]\n", "bar[35-38]\n") self._nodeset_t(["-I 0-3","-f", "-x", "bar35"], "bar[34-68,89-90]\n", "bar[34,36-38]\n") self._nodeset_t(["-I 0-3","-e"], "bar[34-68,89-90]\n", "bar34 bar35 bar36 bar37\n") self._nodeset_t(["-I 3,1,0,2","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n") self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f"], "bar[34-68,89-90]\n", "bar[35,37,41,44,50,54,64,68,89]\n") self._nodeset_t(["-I 8","-f"], "bar[34-68,89-90]\n", "bar42\n") self._nodeset_t(["-I 8-100","-f"], "bar[34-68,89-90]\n", "bar[42-68,89-90]\n") self._nodeset_t(["-I 0-100","-f"], "bar[34-68,89-90]\n", "bar[34-68,89-90]\n") self._nodeset_t(["-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n") self._nodeset_t(["--autostep=2", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n") def test_022_list(self): """test nodeset --list""" f = make_temp_file(""" [Main] default: local [local] map: echo example[1-100] all: echo example[1-1000] list: echo foo bar moo """) ClusterShell.NodeSet.RESOLVER_STD_GROUP = GroupResolverConfig(f.name) try: self._nodeset_t(["--list"], None, "@foo\n@bar\n@moo\n") self._nodeset_t(["-ll"], None, "@foo example[1-100]\n@bar example[1-100]\n@moo example[1-100]\n") self._nodeset_t(["-lll"], None, "@foo example[1-100] 100\n@bar example[1-100] 100\n@moo example[1-100] 100\n") self._nodeset_t(["-l", "example[4,95]", "example5"], None, "@moo\n@bar\n@foo\n") self._nodeset_t(["-ll", "example[4,95]", "example5"], None, "@moo example[4-5,95]\n@bar example[4-5,95]\n@foo example[4-5,95]\n") self._nodeset_t(["-lll", "example[4,95]", "example5"], None, "@moo example[4-5,95] 3/100\n@bar example[4-5,95] 3/100\n@foo example[4-5,95] 3/100\n") # test empty result self._nodeset_t(["-l", "foo[3-70]", "bar6"], None, "") # more arg-mixed tests self._nodeset_t(["-a", "-l"], None, "@moo\n@bar\n@foo\n") self._nodeset_t(["-a", "-l", "-x example[1-100]"], None, "") self._nodeset_t(["-a", "-l", "-x example[1-40]"], None, "@moo\n@bar\n@foo\n") self._nodeset_t(["-l", "-x example3"], None, "") # no -a, remove from nothing self._nodeset_t(["-l", "-i example3"], None, "") # no -a, intersect from nothing self._nodeset_t(["-l", "-X example3"], None, "@moo\n@bar\n@foo\n") # no -a, xor from nothing self._nodeset_t(["-l", "-", "-i example3"], "example[3,500]\n", "@moo\n@bar\n@foo\n") finally: ClusterShell.NodeSet.RESOLVER_STD_GROUP = DEF_RESOLVER_STD_GROUP def test_023_groups(self): """test nodeset with groups""" # Special tests that require a default group source set f = make_temp_file(""" [Main] default: test [test] map: echo example[1-100] all: echo @foo,@bar,@moo list: echo foo bar moo """) ClusterShell.NodeSet.RESOLVER_STD_GROUP = GroupResolverConfig(f.name) try: self._nodeset_t(["--split=2","-r", "unknown2", "unknown3"], None, \ "unknown2\nunknown3\n") self._nodeset_t(["-f", "-a"], None, "example[1-100]\n") self._nodeset_t(["-f", "@moo"], None, "example[1-100]\n") self._nodeset_t(["-f", "@moo", "@bar"], None, "example[1-100]\n") self._nodeset_t(["-e", "-a"], None, ' '.join(["example%d" % i for i in range(1, 101)]) + '\n') self._nodeset_t(["-c", "-a"], None, "100\n") self._nodeset_t(["-r", "-a"], None, "@bar\n") self._nodeset_t(["-s", "test", "-r", "-a"], None, "@test:bar\n") self._nodeset_t(["-s", "test", "-G", "-r", "-a"], None, "@bar\n") self._nodeset_t(["-f", "-a", "-"], "example101\n", "example[1-101]\n") self._nodeset_t(["-f", "-a", "-"], "example102 example101\n", "example[1-102]\n") finally: ClusterShell.NodeSet.RESOLVER_STD_GROUP = DEF_RESOLVER_STD_GROUP if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(CLINodesetTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/tests/MsgTreeTest.py0000644000130500135250000002041111741571247017343 0ustar thiellgpocre#!/usr/bin/env python # ClusterShell test suite # Written by S. Thiell 2010-02-03 """Unit test for ClusterShell MsgTree Class""" from operator import itemgetter import sys import unittest sys.path.insert(0, '../lib') from ClusterShell.MsgTree import * class MsgTreeTest(unittest.TestCase): def test_001_basics(self): """test MsgTree basics""" tree = MsgTree() self.assertEqual(len(tree), 0) tree.add("key", "message") self.assertEqual(len(tree), 1) tree.add("key", "message2") self.assertEqual(len(tree), 1) tree.add("key2", "message3") self.assertEqual(len(tree), 2) def test_002_elem(self): """test MsgTreeElem""" elem = MsgTreeElem() self.assertEqual(len(elem), 0) for s in elem: self.fail("found line in empty MsgTreeElem!") def test_003_iterators(self): """test MsgTree iterators""" # build tree... tree = MsgTree() self.assertEqual(len(tree), 0) tree.add(("item1", "key"), "message0") self.assertEqual(len(tree), 1) tree.add(("item2", "key"), "message2") self.assertEqual(len(tree), 2) tree.add(("item3", "key"), "message3") self.assertEqual(len(tree), 3) tree.add(("item4", "key"), "message3") tree.add(("item2", "newkey"), "message4") self.assertEqual(len(tree), 5) self.assertEqual(tree._depth(), 1) # test standard iterator (over keys) cnt = 0 what = set([ ("item1", "key"), ("item2", "key"), ("item3", "key"), \ ("item4", "key"), ("item2", "newkey") ]) for key in tree: cnt += 1 what.remove(key) self.assertEqual(cnt, 5) self.assertEqual(len(what), 0) # test keys() iterator cnt = 0 for key in tree.keys(): # keep this test for return value check cnt += 1 self.assertEqual(cnt, 5) self.assertEqual(len(list(iter(tree.keys()))), 5) # test messages() iterator (iterate over different messages) cnt = 0 for msg in tree.messages(): cnt += 1 self.assertEqual(len(msg), len("message0")) self.assertEqual(msg[0][:-1], "message") self.assertEqual(cnt, 4) self.assertEqual(len(list(iter(tree.messages()))), 4) # test items() iterator (iterate over all key, msg pairs) cnt = 0 for key, msg in tree.items(): cnt += 1 self.assertEqual(cnt, 5) self.assertEqual(len(list(iter(tree.items()))), 5) # test walk() iterator (iterate by msg and give the list of # associated keys) cnt = 0 cnt_2 = 0 for msg, keys in tree.walk(): cnt += 1 if len(keys) == 2: self.assertEqual(msg, "message3") cnt_2 += 1 self.assertEqual(cnt, 4) self.assertEqual(cnt_2, 1) self.assertEqual(len(list(iter(tree.walk()))), 4) # test walk() with provided key-filter cnt = 0 for msg, keys in tree.walk(match=lambda s: s[1] == "newkey"): cnt += 1 self.assertEqual(cnt, 1) # test walk() with provided key-mapper cnt = 0 cnt_2 = 0 for msg, keys in tree.walk(mapper=itemgetter(0)): cnt += 1 if len(keys) == 2: for k in keys: self.assertEqual(type(k), str) cnt_2 += 1 self.assertEqual(cnt, 4) self.assertEqual(cnt_2, 1) # test walk with full options: key-filter and key-mapper cnt = 0 for msg, keys in tree.walk(match=lambda k: k[1] == "newkey", mapper=itemgetter(0)): cnt += 1 self.assertEqual(msg, "message4") self.assertEqual(keys[0], "item2") self.assertEqual(cnt, 1) cnt = 0 for msg, keys in tree.walk(match=lambda k: k[1] == "key", mapper=itemgetter(0)): cnt += 1 self.assertEqual(keys[0][:-1], "item") self.assertEqual(cnt, 3) # 3 and not 4 because item3 and item4 are merged def test_004_getitem(self): """test MsgTree get and __getitem__""" # build tree... tree = MsgTree() tree.add("item1", "message0") self.assertEqual(len(tree), 1) tree.add("item2", "message2") tree.add("item3", "message2") tree.add("item4", "message3") tree.add("item2", "message4") tree.add("item3", "message4") self.assertEqual(len(tree), 4) self.assertEqual(tree["item1"], "message0") self.assertEqual(tree.get("item1"), "message0") self.assertEqual(tree["item2"], "message2\nmessage4") self.assertEqual(tree.get("item2"), "message2\nmessage4") self.assertEqual(tree.get("item5", "default_buf"), "default_buf") self.assertEqual(tree._depth(), 2) def test_005_remove(self): """test MsgTree.remove()""" # build tree tree = MsgTree() self.assertEqual(len(tree), 0) tree.add(("w1", "key1"), "message0") self.assertEqual(len(tree), 1) tree.add(("w1", "key2"), "message0") self.assertEqual(len(tree), 2) tree.add(("w1", "key3"), "message0") self.assertEqual(len(tree), 3) tree.add(("w2", "key4"), "message1") self.assertEqual(len(tree), 4) tree.remove(lambda k: k[1] == "key2") self.assertEqual(len(tree), 3) for msg, keys in tree.walk(match=lambda k: k[0] == "w1", mapper=itemgetter(1)): self.assertEqual(msg, "message0") self.assertEqual(len(keys), 2) tree.remove(lambda k: k[0] == "w1") self.assertEqual(len(tree), 1) tree.remove(lambda k: k[0] == "w2") self.assertEqual(len(tree), 0) tree.clear() self.assertEqual(len(tree), 0) def test_006_scalability(self): """test MsgTree scalability""" # build tree... tree = MsgTree() for i in xrange(0, 10000): tree.add("node%d" % i, "message%d" % i) self.assertEqual(len(tree), 10000) cnt = 0 for msg, keys in tree.walk(): cnt += 1 def test_007_shift_mode(self): """test MsgTree in shift mode""" tree = MsgTree(mode=MODE_SHIFT) tree.add("item1", "message0") self.assertEqual(len(tree), 1) tree.add("item2", "message2") tree.add("item3", "message2") tree.add("item4", "message3") tree.add("item2", "message4") tree.add("item3", "message4") self.assertEqual(len(tree), 4) self.assertEqual(tree["item1"], "message0") self.assertEqual(tree.get("item1"), "message0") self.assertEqual(tree["item2"], "message2\nmessage4") self.assertEqual(tree.get("item2"), "message2\nmessage4") self.assertEqual(tree.get("item5", "default_buf"), "default_buf") self.assertEqual(tree._depth(), 2) self.assertEqual(len(list(tree.walk())), 3) def test_008_trace_mode(self): """test MsgTree in trace mode""" tree = MsgTree(mode=MODE_TRACE) tree.add("item1", "message0") self.assertEqual(len(tree), 1) tree.add("item2", "message2") tree.add("item3", "message2") tree.add("item4", "message3") tree.add("item2", "message4") tree.add("item3", "message4") self.assertEqual(len(tree), 4) self.assertEqual(tree["item1"], "message0") self.assertEqual(tree.get("item1"), "message0") self.assertEqual(tree["item2"], "message2\nmessage4") self.assertEqual(tree.get("item2"), "message2\nmessage4") self.assertEqual(tree.get("item5", "default_buf"), "default_buf") self.assertEqual(tree._depth(), 2) self.assertEqual(len(list(tree.walk())), 4) self.assertEqual(list(tree.walk_trace()), \ [('message0', ['item1'], 1, 0), ('message2', ['item2', 'item3'], 1, 1), ('message4', ['item2', 'item3'], 2, 0), ('message3', ['item4'], 1, 0)]) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(MsgTreeTest) unittest.TextTestRunner(verbosity=2).run(suite) clustershell-1.6/setup.cfg0000644000130500135250000000002711741571247015243 0ustar thiellgpocre[install] optimize = 1 clustershell-1.6/README0000644000130500135250000001016411741571247014305 0ustar thiellgpocre ----------------------------------------- ClusterShell 1.6 Python Library and Tools ----------------------------------------- ClusterShell is an event-driven open source Python library, designed to run local or distant commands in parallel on server farms or on large Linux clusters. It will take care of common issues encountered on HPC clusters, such as operating on groups of nodes, running distributed commands using optimized execution algorithms, as well as gathering results and merging identical outputs, or retrieving return codes. ClusterShell takes advantage of existing remote shell facilities already installed on your systems, like SSH. ClusterShell's primary goal is to improve the administration of high- performance clusters by providing a lightweight but scalable Python API for developers. It also provides clush, clubak and nodeset, three convenient command-line tools that allow traditional shell scripts to benefit from some of the library features. ------------------- Requirements (v1.6) ------------------- * GNU/Linux, *BSD, Mac OS X, etc. * OpenSSH (ssh/scp) * Python 2.x (x >= 4) ------- License ------- ClusterShell is distributed under the CeCILL-C license, a French transposition of the GNU LGPL, and is fully LGPL-compatible (see Licence_CeCILL-C_V1-en.txt). ------------ Installation ------------ When possible, please use the RPM/deb package distribution: https://github.com/cea-hpc/clustershell/wiki/GetClusterShell Otherwise in the source directory, use: # python setup.py install # cp -r conf /etc/clustershell For installation on Mac OS X, please see: https://github.com/cea-hpc/clustershell/wiki/ClusterShellOnMacOSX ---------- Test Suite ---------- Regression testing scripts are available in the 'tests' directory: $ cd tests $ nosetests -sv $ nosetests -sv --all-modules You have to allow 'ssh localhost' with no warning for "remote" tests to run. -------------- Documentation ------------- Local API documentation is available, just type: $ pydoc ClusterShell Online API documentation (epydoc) is available here: http://packages.python.org/ClusterShell/ -------------------------- ClusterShell interactively -------------------------- Python 2.7.1 (r271:86832, Apr 12 2011, 16:15:16) [GCC 4.6.0 20110331 (Red Hat 4.6.0-2)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> from ClusterShell.Task import task_self >>> from ClusterShell.NodeSet import NodeSet >>> task = task_self() >>> task.run("/bin/uname -r", nodes="linux[4-6,32-39]") >>> for buf, key in task.iter_buffers(): ... print NodeSet.fromlist(key), buf ... linux[32-39] 2.6.40.6-0.fc15.x86_64 linux[4-6] 2.6.32-71.el6.x86_64 ------------------ ClusterShell Tools ------------------ Powerful tools are provided: clush, nodeset and clubak. * clush is a friendly and full-featured parallel shell (see: man clush). If in doubt, just check if your other parallel tools can do things like: # tar -czf - dir | clush -w node[10-44] tar -C /tmp -xzvf - * nodeset is used to deal with your cluster nodeset, it can be bound to external groups (see: man nodeset and man groups.conf). * clubak is a tool used to format output from clush/pdsh-like output (already included in clush with -b), see man clubak. ----- Links ----- Main web site: http://clustershell.sourceforge.net or http://cea-hpc.github.com/clustershell/ Github source respository: https://github.com/cea-hpc/clustershell Github Wiki: https://github.com/cea-hpc/clustershell/wiki Github Issue tracking system: https://github.com/cea-hpc/clustershell/issues Sourceforge.net project page: http://sourceforge.net/projects/clustershell Python Package Index (PyPI) link: http://pypi.python.org/pypi/ClusterShell ClusterShell was born along with Shine, a scalable Lustre FS admin tool: http://lustre-shine.sourceforge.net ------- Authors ------- Stephane Thiell Aurelien Degremont CEA/DAM 2010, 2011, 2012 - http://www-hpc.cea.fr clustershell-1.6/setup.py0000644000130500135250000000632111741571374015140 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. import os from setuptools import setup, find_packages if not os.access('scripts/clubak', os.F_OK): os.symlink('clubak.py', 'scripts/clubak') if not os.access('scripts/clush', os.F_OK): os.symlink('clush.py', 'scripts/clush') if not os.access('scripts/nodeset', os.F_OK): os.symlink('nodeset.py', 'scripts/nodeset') VERSION='1.6' setup(name='ClusterShell', version=VERSION, package_dir={'': 'lib'}, packages=find_packages('lib'), scripts=['scripts/clubak', 'scripts/clush', 'scripts/nodeset'], author='Stephane Thiell', author_email='stephane.thiell@cea.fr', license='CeCILL-C (French equivalent to LGPLv2+)', url='http://clustershell.sourceforge.net/', download_url='http://sourceforge.net/projects/clustershell/files/' 'clustershell/%s/' % VERSION, platforms=['GNU/Linux', 'BSD', 'MacOSX'], keywords=['clustershell', 'clush', 'clubak', 'nodeset'], description='ClusterShell library and tools', long_description=open('doc/txt/clustershell.rst').read(), classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: System Administrators", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: BSD", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Clustering", "Topic :: System :: Distributed Computing" ] ) clustershell-1.6/clustershell.spec0000644000130500135250000001274211741572334017014 0ustar thiellgpocre%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} Name: clustershell Version: 1.6 Release: 1%{?dist} Summary: Python framework for efficient cluster administration Group: System Environment/Base License: CeCILL-C URL: http://clustershell.sourceforge.net/ Source0: http://downloads.sourceforge.net/%{name}/%{name}-%{version}.tar.gz BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) BuildArch: noarch BuildRequires: python-devel python-setuptools %description Tools and event-based Python library to execute commands on cluster nodes in parallel depending on selected engine and worker mechanisms. The library provides also advanced NodeSet and NodeGroups handling methods to ease and improve administration of large compute clusters or server farms. Three convenient command line utilities, clush, clubak and nodeset, allow traditional shell scripts to benefit some useful features offered by the library. %package -n vim-%{name} Summary: VIM files for ClusterShell Group: System Environment/Base Requires: clustershell = %{version}-%{release}, vim-common %description -n vim-%{name} Syntax highlighting in the VIM editor for ClusterShell configuration files. %prep %setup -q %build %{__python} setup.py build %install rm -rf %{buildroot} %{__python} setup.py install -O1 --skip-build --root %{buildroot} # config files install -d %{buildroot}/%{_sysconfdir}/clustershell/groups.conf.d install -p -m 0644 conf/*.conf conf/groups %{buildroot}/%{_sysconfdir}/clustershell/ install -p -m 0644 conf/groups.conf.d/README conf/groups.conf.d/*.conf.example %{buildroot}/%{_sysconfdir}/clustershell/groups.conf.d # man pages install -d %{buildroot}/%{_mandir}/{man1,man5} install -p -m 0644 doc/man/man1/clubak.1 %{buildroot}/%{_mandir}/man1/ install -p -m 0644 doc/man/man1/clush.1 %{buildroot}/%{_mandir}/man1/ install -p -m 0644 doc/man/man1/nodeset.1 %{buildroot}/%{_mandir}/man1/ install -p -m 0644 doc/man/man5/clush.conf.5 %{buildroot}/%{_mandir}/man5/ install -p -m 0644 doc/man/man5/groups.conf.5 %{buildroot}/%{_mandir}/man5/ # docs and example scripts install -d %{buildroot}/%{_defaultdocdir}/%{name}-%{version}/examples install -p -m 0644 README ChangeLog Licence_CeCILL-C_V1-en.txt Licence_CeCILL-C_V1-fr.txt %{buildroot}/%{_defaultdocdir}/%{name}-%{version}/ install -p -m 0755 doc/examples/*.py %{buildroot}/%{_defaultdocdir}/%{name}-%{version}/examples/ # vim addons %define vimdatadir %{_datadir}/vim/vimfiles install -d %{buildroot}/%{vimdatadir}/{ftdetect,syntax} install -p -m 0644 doc/extras/vim/ftdetect/clustershell.vim %{buildroot}/%{vimdatadir}/ftdetect/ install -p -m 0644 doc/extras/vim/syntax/clushconf.vim %{buildroot}/%{vimdatadir}/syntax/ install -p -m 0644 doc/extras/vim/syntax/groupsconf.vim %{buildroot}/%{vimdatadir}/syntax/ %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %doc %{_defaultdocdir}/%{name}-%{version}/ %{_mandir}/man1/clubak.1* %{_mandir}/man1/clush.1* %{_mandir}/man1/nodeset.1* %{_mandir}/man5/clush.conf.5* %{_mandir}/man5/groups.conf.5* %dir %{_sysconfdir}/clustershell %config(noreplace) %{_sysconfdir}/clustershell/clush.conf %config(noreplace) %{_sysconfdir}/clustershell/groups %config(noreplace) %{_sysconfdir}/clustershell/groups.conf %dir %{_sysconfdir}/clustershell/groups.conf.d %doc %{_sysconfdir}/clustershell/groups.conf.d/README %doc %{_sysconfdir}/clustershell/groups.conf.d/*.conf.example %{python_sitelib}/ClusterShell/ %{python_sitelib}/ClusterShell-*-py?.?.egg-info %{_bindir}/clubak %{_bindir}/clush %{_bindir}/nodeset %files -n vim-%{name} %defattr(-,root,root,-) %{vimdatadir}/ftdetect/clustershell.vim %{vimdatadir}/syntax/clushconf.vim %{vimdatadir}/syntax/groupsconf.vim %changelog * Sun Apr 08 2012 Stephane Thiell 1.6-1 - update to 1.6 * Thu Jun 09 2011 Stephane Thiell 1.5.1-1 - update to 1.5.1 * Wed Jun 08 2011 Stephane Thiell 1.5-1 - update to 1.5 * Sat Mar 19 2011 Stephane Thiell 1.4.3-1 - update to 1.4.3 * Tue Mar 15 2011 Stephane Thiell 1.4.2-1 - update to 1.4.2 * Sun Feb 13 2011 Stephane Thiell 1.4.1-1 - update to 1.4.1 * Sat Jan 15 2011 Stephane Thiell 1.4-1 - update to 1.4 * Wed Oct 20 2010 Stephane Thiell 1.3.3-1 - update to 1.3.3 * Fri Sep 10 2010 Stephane Thiell 1.3.2-1 - renamed Vim subpackage to vim-clustershell - update to 1.3.2 * Sun Sep 05 2010 Stephane Thiell 1.3.1-2 - added -vim subpackage for .vim files * Fri Sep 03 2010 Stephane Thiell 1.3.1-1 - removed -n from setup line - own clustershell config directory for proper uninstall - install vim syntax addons in vimfiles, thus avoiding vim version detection - update to 1.3.1 * Sun Aug 22 2010 Stephane Thiell 1.3-4 - fixed BuildRoot tag in accordance with EPEL guidelines - python_sitelib definition: prefer global vs define - preserve timestamps and fix permissions when installing files * Sat Aug 21 2010 Stephane Thiell 1.3-3 - use a full URL to the package in Source0 * Fri Aug 20 2010 Stephane Thiell 1.3-2 - various improvements per first review request * Thu Aug 19 2010 Stephane Thiell 1.3-1 - initial build candidate for Fedora clustershell-1.6/lib/0000755000130500135250000000000011741572333014166 5ustar thiellgpocreclustershell-1.6/lib/ClusterShell/0000755000130500135250000000000011741572333016577 5ustar thiellgpocreclustershell-1.6/lib/ClusterShell/Propagation.py0000644000130500135250000003156711741571247021453 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Henri DOREAU # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell Propagation module. Use the topology tree to send commands through gateways and gather results. """ import logging from ClusterShell.NodeSet import NodeSet from ClusterShell.Communication import Channel from ClusterShell.Communication import ControlMessage, StdOutMessage from ClusterShell.Communication import StdErrMessage, RetcodeMessage from ClusterShell.Communication import RoutedMessageBase, EndMessage from ClusterShell.Communication import ConfigurationMessage, TimeoutMessage class RouteResolvingError(Exception): """error raised on invalid conditions during routing operations""" class PropagationTreeRouter(object): """performs routes resolving operations within a propagation tree. This object provides a next_hop method, that will look for the best directly connected node to use to forward a message to a remote node. Upon instanciation, the router will parse the topology tree to generate its routing table. """ def __init__(self, root, topology, fanout=0): self.root = root self.topology = topology self.fanout = fanout self.nodes_fanin = {} self.table = None self.table_generate(root, topology) self._unreachable_hosts = NodeSet() def table_generate(self, root, topology): """The router relies on a routing table. The keys are the destination nodes and the values are the next hop gateways to use to reach these nodes. """ self.table = {} root_group = None for entry in topology.groups: if root in entry.nodeset: root_group = entry break if root_group is None: raise RouteResolvingError('Invalid admin node: %s' % root) for group in root_group.children(): self.table[group.nodeset] = NodeSet() stack = [group] while len(stack) > 0: curr = stack.pop() self.table[group.nodeset].add(curr.children_ns()) stack += curr.children() # reverse table (it was crafted backward) self.table = dict((v, k) for k, v in self.table.iteritems()) def dispatch(self, dst): """dispatch nodes from a target nodeset to the directly connected gateways. The method acts as an iterator, returning a gateway and the associated hosts. It should provide a rather good load balancing between the gateways. """ # Check for directly connected targets res = [tmp & dst for tmp in self.table.values()] nexthop = NodeSet() [nexthop.add(x) for x in res] if len(nexthop) > 0: yield nexthop, nexthop # Check for remote targets, that require a gateway to be reached for network in self.table.iterkeys(): dst_inter = network & dst dst.difference_update(dst_inter) for host in dst_inter.nsiter(): yield self.next_hop(host), host def next_hop(self, dst): """perform the next hop resolution. If several hops are available, then, the one with the least number of current jobs will be returned """ if dst in self._unreachable_hosts: raise RouteResolvingError( 'Invalid destination: %s, host is unreachable' % dst) # can't resolve if source == destination if self.root == dst: raise RouteResolvingError( 'Invalid resolution request: %s -> %s' % (self.root, dst)) ## ------------------ # the routing table is organized this way: # # NETWORK | NEXT HOP # ------------+----------- # node[0-9] | gateway0 # node[10-19] | gateway[1-2] # ... # --------- for network, nexthops in self.table.iteritems(): # destination contained in current network if dst in network: res = self._best_next_hop(nexthops) if res is None: raise RouteResolvingError('No route available to %s' % \ str(dst)) self.nodes_fanin[res] += len(dst) return res # destination contained in current next hops (ie. directly # connected) if dst in nexthops: return dst raise RouteResolvingError( 'No route from %s to host %s' % (self.root, dst)) def mark_unreachable(self, dst): """mark node dst as unreachable and don't advertise routes through it anymore. The cache will be updated only when necessary to avoid performing expensive traversals. """ # Simply mark dst as unreachable in a dedicated NodeSet. This # list will be consulted by the resolution method self._unreachable_hosts.add(dst) def _best_next_hop(self, candidates): """find out a good next hop gateway""" backup = None backup_connections = 1e400 # infinity candidates = candidates.difference(self._unreachable_hosts) for host in candidates: # the router tracks established connections in the # nodes_fanin table to avoid overloading a gateway connections = self.nodes_fanin.setdefault(host, 0) # FIXME #if connections < self.fanout: # # currently, the first one is the best # return host if backup_connections > connections: backup = host backup_connections = connections return backup class PropagationChannel(Channel): """Admin node propagation logic. Instances are able to handle incoming messages from a directly connected gateway, process them and reply. In order to take decisions, the instance acts as a finite states machine, whose current state evolves according to received data. -- INTERNALS -- Instance can be in one of the 4 different states: - init (implicit) This is the very first state. The instance enters the init state at start() method, and will then send the configuration to the remote node. Once the configuration is sent away, the state changes to cfg. - cfg During this second state, the instance will wait for a valid acknowledgement from the gateway to the previously sent configuration message. If such a message is delivered, the control message (the one that contains the actions to perform) is sent, and the state is set to ctl. - ctl Third state, the instance is waiting for a valid ack for from the gateway to the ctl packet. Then, the state switch to gtr (gather). - gtr Final state: wait for results from the subtree and store them. """ def __init__(self, task): """ """ Channel.__init__(self) self.task = task self.workers = {} self.current_state = None self.states = { 'STATE_CFG': self._state_config, 'STATE_CTL': self._state_control, #'STATE_GTR': self._state_gather, } self._history = {} # track informations about previous states self._sendq = [] self.logger = logging.getLogger(__name__) def start(self): """initial actions""" #print '[DBG] start' self._open() cfg = ConfigurationMessage() #cfg.data_encode(self.task._default_topology()) cfg.data_encode(self.task.topology) self._history['cfg_id'] = cfg.msgid self.send(cfg) self.current_state = self.states['STATE_CFG'] def recv(self, msg): """process incoming messages""" self.logger.debug("[DBG] rcvd from: %s" % str(msg)) if msg.ident == EndMessage.ident: #??#self.ptree.notify_close() self.logger.debug("closing") # abort worker (now working) self.worker.abort() else: self.current_state(msg) def shell(self, nodes, command, worker, timeout, stderr, gw_invoke_cmd): """command execution through channel""" self.logger.debug("shell nodes=%s timeout=%f worker=%s" % \ (nodes, timeout, id(worker))) self.workers[id(worker)] = worker ctl = ControlMessage(id(worker)) ctl.action = 'shell' ctl.target = nodes info = self.task._info.copy() info['debug'] = False ctl_data = { 'cmd': command, 'invoke_gateway': gw_invoke_cmd, # XXX 'taskinfo': info, #self.task._info, 'stderr': stderr, 'timeout': timeout, } ctl.data_encode(ctl_data) self._history['ctl_id'] = ctl.msgid if self.current_state == self.states['STATE_CTL']: # send now if channel state is CTL self.send(ctl) else: self._sendq.append(ctl) def _state_config(self, msg): """handle incoming messages for state 'propagate configuration'""" if msg.type == 'ACK': # and msg.ack == self._history['cfg_id']: self.current_state = self.states['STATE_CTL'] for ctl in self._sendq: self.send(ctl) else: print str(msg) def _state_control(self, msg): """handle incoming messages for state 'control'""" if msg.type == 'ACK': # and msg.ack == self._history['ctl_id']: #self.current_state = self.states['STATE_GTR'] self.logger.debug("PropChannel: _state_control -> STATE_GTR") elif isinstance(msg, RoutedMessageBase): metaworker = self.workers[msg.srcid] if msg.type == StdOutMessage.ident: if metaworker.eh: nodeset = NodeSet(msg.nodes) self.logger.debug("StdOutMessage: \"%s\"", msg.data) for line in msg.data.splitlines(): for node in nodeset: metaworker._on_node_msgline(node, line) elif msg.type == StdErrMessage.ident: if metaworker.eh: nodeset = NodeSet(msg.nodes) self.logger.debug("StdErrMessage: \"%s\"", msg.data) for line in msg.data.splitlines(): for node in nodeset: metaworker._on_node_errline(node, line) elif msg.type == RetcodeMessage.ident: rc = msg.retcode for node in NodeSet(msg.nodes): metaworker._on_node_rc(node, rc) elif msg.type == TimeoutMessage.ident: self.logger.debug("TimeoutMessage for %s", msg.nodes) for node in NodeSet(msg.nodes): metaworker._on_node_timeout(node) else: self.logger.debug("PropChannel: _state_gather unhandled msg %s" % \ msg) """ return if self.ptree.upchannel is not None: self.logger.debug("_state_gather ->upchan %s" % msg) self.ptree.upchannel.send(msg) # send to according event handler passed by shell() else: assert False """ def ev_close(self, worker): worker.flush_buffers() clustershell-1.6/lib/ClusterShell/NodeUtils.py0000644000130500135250000003117011741571247021064 0ustar thiellgpocre# Copyright CEA/DAM/DIF (2010, 2012) # Contributors: # Stephane THIELL # Aurelien DEGREMONT # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Cluster nodes utility module The NodeUtils module is a ClusterShell helper module that provides supplementary services to manage nodes in a cluster. It is primarily designed to enhance the NodeSet module providing some binding support to external node groups sources in separate namespaces (example of group sources are: files, jobs scheduler, custom scripts, etc.). """ import glob import os import sys from ConfigParser import ConfigParser, NoOptionError, NoSectionError from string import Template from subprocess import Popen, PIPE class GroupSourceException(Exception): """Base GroupSource exception""" def __init__(self, message, group_source): Exception.__init__(self, message) self.group_source = group_source class GroupSourceNoUpcall(GroupSourceException): """Raised when upcall is not available""" class GroupSourceQueryFailed(GroupSourceException): """Raised when a query failed (eg. no group found)""" class GroupResolverError(Exception): """Base GroupResolver error""" class GroupResolverSourceError(GroupResolverError): """Raised when upcall is not available""" class GroupResolverConfigError(GroupResolverError): """Raised when a configuration error is encountered""" class GroupSource(object): """ GroupSource class managing external calls for nodegroup support. """ def __init__(self, name, map_upcall, all_upcall=None, list_upcall=None, reverse_upcall=None, cfgdir=None): self.name = name self.verbosity = 0 self.cfgdir = cfgdir # Supported external upcalls self.map_upcall = map_upcall self.all_upcall = all_upcall self.list_upcall = list_upcall self.reverse_upcall = reverse_upcall # Cache upcall data self._cache_map = {} self._cache_list = [] self._cache_all = None self._cache_reverse = {} def _verbose_print(self, msg): """Print msg depending on the verbosity level.""" if self.verbosity > 0: print >> sys.stderr, "%s<%s> %s" % \ (self.__class__.__name__, self.name, msg) def _upcall_read(self, cmdtpl, vars=dict()): """ Invoke the specified upcall command, raise an Exception if something goes wrong and return the command output otherwise. """ cmdline = Template(getattr(self, "%s_upcall" % \ cmdtpl)).safe_substitute(vars) self._verbose_print("EXEC '%s'" % cmdline) proc = Popen(cmdline, stdout=PIPE, shell=True, cwd=self.cfgdir) output = proc.communicate()[0].strip() self._verbose_print("READ '%s'" % output) if proc.returncode != 0: self._verbose_print("ERROR '%s' returned %d" % (cmdline, \ proc.returncode)) raise GroupSourceQueryFailed(cmdline, self) return output def resolv_map(self, group): """ Get nodes from group 'group', using the cached value if available. """ if group not in self._cache_map: self._cache_map[group] = self._upcall_read('map', dict(GROUP=group)) return self._cache_map[group] def resolv_list(self): """ Return a list of all group names for this group source, using the cached value if available. """ if not self.list_upcall: raise GroupSourceNoUpcall("list", self) if not self._cache_list: self._cache_list = self._upcall_read('list') return self._cache_list def resolv_all(self): """ Return the content of special group ALL, using the cached value if available. """ if not self.all_upcall: raise GroupSourceNoUpcall("all", self) if not self._cache_all: self._cache_all = self._upcall_read('all') return self._cache_all def resolv_reverse(self, node): """ Return the group name matching the provided node, using the cached value if available. """ if not self.reverse_upcall: raise GroupSourceNoUpcall("reverse", self) if node not in self._cache_reverse: self._cache_reverse[node] = self._upcall_read('reverse', \ dict(NODE=node)) return self._cache_reverse[node] class GroupResolver(object): """ Base class GroupResolver that aims to provide node/group resolution from multiple GroupSource's. """ def __init__(self, default_source=None): """ Initialize GroupResolver object. """ self._sources = {} self._default_source = default_source if default_source: self._sources[default_source.name] = default_source def set_verbosity(self, value): """ Set debugging verbosity value. """ for source in self._sources.itervalues(): source.verbosity = value def add_source(self, group_source): """ Add a GroupSource to this resolver. """ if group_source.name in self._sources: raise ValueError("GroupSource '%s': name collision" % \ group_source.name) self._sources[group_source.name] = group_source def sources(self): """ Get the list of all resolver source names. """ return self._sources.keys() def _list(self, source, what, *args): """Helper method that returns a list of result when the source is defined.""" result = [] assert source raw = getattr(source, 'resolv_%s' % what)(*args) for line in raw.splitlines(): map(result.append, line.strip().split()) return result def _source(self, namespace): """Helper method that returns the source by namespace name.""" if not namespace: source = self._default_source else: source = self._sources.get(namespace) if not source: raise GroupResolverSourceError(namespace or "") return source def group_nodes(self, group, namespace=None): """ Find nodes for specified group name and optional namespace. """ source = self._source(namespace) return self._list(source, 'map', group) def all_nodes(self, namespace=None): """ Find all nodes. You may specify an optional namespace. """ source = self._source(namespace) return self._list(source, 'all') def grouplist(self, namespace=None): """ Get full group list. You may specify an optional namespace. """ source = self._source(namespace) return self._list(source, 'list') def has_node_groups(self, namespace=None): """ Return whether finding group list for a specified node is supported by the resolver (in optional namespace). """ try: return bool(self._source(namespace).reverse_upcall) except GroupResolverSourceError: return False def node_groups(self, node, namespace=None): """ Find group list for specified node and optional namespace. """ source = self._source(namespace) return self._list(source, 'reverse', node) class GroupResolverConfig(GroupResolver): """ GroupResolver class that is able to automatically setup its GroupSource's from a configuration file. This is the default resolver for NodeSet. """ def __init__(self, configfile): """ """ GroupResolver.__init__(self) self.default_sourcename = None self.config = ConfigParser() self.config.read(configfile) # Get config file sections groupscfgs = {} configfile_dirname = os.path.dirname(configfile) for section in self.config.sections(): if section != 'Main': groupscfgs[section] = (self.config, configfile_dirname) try: self.groupsdir = self.config.get('Main', 'groupsdir') for groupsdir in self.groupsdir.split(): # support relative-to-dirname(groups.conf) groupsdir groupsdir = os.path.normpath(os.path.join(configfile_dirname, \ groupsdir)) if not os.path.isdir(groupsdir): if not os.path.exists(groupsdir): continue raise GroupResolverConfigError("Defined groupsdir %s " \ "is not a directory" % groupsdir) for groupsfn in sorted(glob.glob('%s/*.conf' % groupsdir)): grpcfg = ConfigParser() grpcfg.read(groupsfn) # ignore files that cannot be read for section in grpcfg.sections(): if section in groupscfgs: raise GroupResolverConfigError("Group source " \ "\"%s\" re-defined in %s" % (section, groupsfn)) groupscfgs[section] = (grpcfg, groupsdir) except (NoSectionError, NoOptionError): pass try: self.default_sourcename = self.config.get('Main', 'default') if self.default_sourcename and self.default_sourcename \ not in groupscfgs.keys(): raise GroupResolverConfigError("Default group source not " \ "found: \"%s\"" % self.default_sourcename) except (NoSectionError, NoOptionError): pass if not groupscfgs: return # When not specified, select a random section. if not self.default_sourcename: self.default_sourcename = groupscfgs.keys()[0] try: for section, (cfg, cfgdir) in groupscfgs.iteritems(): map_upcall = cfg.get(section, 'map', True) all_upcall = list_upcall = reverse_upcall = None if cfg.has_option(section, 'all'): all_upcall = cfg.get(section, 'all', True) if cfg.has_option(section, 'list'): list_upcall = cfg.get(section, 'list', True) if cfg.has_option(section, 'reverse'): reverse_upcall = cfg.get(section, 'reverse', True) self.add_source(GroupSource(section, map_upcall, all_upcall, \ list_upcall, reverse_upcall, cfgdir)) except (NoSectionError, NoOptionError), exc: raise GroupResolverConfigError(str(exc)) def _source(self, namespace): return GroupResolver._source(self, namespace or self.default_sourcename) def sources(self): """ Get the list of all resolver source names (default source is always first). """ srcs = GroupResolver.sources(self) if srcs: srcs.remove(self.default_sourcename) srcs.insert(0, self.default_sourcename) return srcs clustershell-1.6/lib/ClusterShell/Worker/0000755000130500135250000000000011741572333020050 5ustar thiellgpocreclustershell-1.6/lib/ClusterShell/Worker/fastsubprocess.py0000644000130500135250000003552311741571247023503 0ustar thiellgpocre# fastsubprocess - POSIX relaxed revision of subprocess.py # Based on Python 2.6.4 subprocess.py # This is a performance oriented version of subprocess module. # Modified by Stephane Thiell # Changes: # * removed Windows specific code parts # * removed pipe for transferring possible exec failure from child to # parent, to avoid os.read() blocking call after each fork. # * child returns status code 255 on execv failure, which can be # handled with Popen.wait(). # * removed file objects creation using costly fdopen(): this version # returns non-blocking file descriptors bound to child # * added module method set_nonblock_flag() and used it in Popen(). ## # Original Disclaimer: # # For more information about this module, see PEP 324. # # This module should remain compatible with Python 2.2, see PEP 291. # # Copyright (c) 2003-2005 by Peter Astrand # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. """_subprocess - Subprocesses with accessible I/O non-blocking file descriptors Faster revision of subprocess-like module. """ import sys import os import types import gc import signal # Exception classes used by this module. class CalledProcessError(Exception): """This exception is raised when a process run by check_call() returns a non-zero exit status. The exit status will be stored in the returncode attribute.""" def __init__(self, returncode, cmd): self.returncode = returncode self.cmd = cmd def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) import select import errno import fcntl __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", \ "CalledProcessError"] try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 _active = [] def _cleanup(): for inst in _active[:]: if inst._internal_poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. pass PIPE = -1 STDOUT = -2 def call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ return Popen(*popenargs, **kwargs).wait() def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode def set_nonblock_flag(fd): """Set non blocking flag to file descriptor fd""" old = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, old | os.O_NDELAY) class Popen(object): """A faster Popen""" def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, shell=False, cwd=None, env=None, universal_newlines=False): """Create new Popen instance.""" _cleanup() self._child_created = False if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") self.pid = None self.returncode = None self.universal_newlines = universal_newlines # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are None when not using PIPEs. The child objects are None # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, cwd, env, universal_newlines, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if p2cwrite is not None: set_nonblock_flag(p2cwrite) self.stdin = p2cwrite if c2pread is not None: set_nonblock_flag(c2pread) self.stdout = c2pread if errread is not None: set_nonblock_flag(errread) self.stderr = errread def _translate_newlines(self, data): data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") return data def __del__(self, sys=sys): if not self._child_created: # We didn't get to successfully create a child process. return # In case the child hasn't been waited on, check if it's done. self._internal_poll(_deadstate=sys.maxint) if self.returncode is None and _active is not None: # Child is still running, keep us alive until we can wait on it. _active.append(self) def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" # Optimization: If we are only using one pipe, or no pipe at # all, using select() or threads is unnecessary. if [self.stdin, self.stdout, self.stderr].count(None) >= 2: stdout = None stderr = None if self.stdin: if input: self.stdin.write(input) self.stdin.close() elif self.stdout: stdout = self.stdout.read() self.stdout.close() elif self.stderr: stderr = self.stderr.read() self.stderr.close() self.wait() return (stdout, stderr) return self._communicate(input) def poll(self): return self._internal_poll() def _get_handles(self, stdin, stdout, stderr): """Construct and return tupel with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = os.pipe() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = os.pipe() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = os.pipe() elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _execute_child(self, args, executable, preexec_fn, cwd, env, universal_newlines, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable is None: executable = args[0] gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) # Dup fds for child if p2cread is not None: os.dup2(p2cread, 0) if c2pwrite is not None: os.dup2(c2pwrite, 1) if errwrite is not None: os.dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the same # fd more than once, or standard fds. if p2cread is not None and p2cread not in (0,): os.close(p2cread) if c2pwrite is not None and c2pwrite not in (p2cread, 1): os.close(c2pwrite) if errwrite is not None and errwrite not in \ (p2cread, c2pwrite, 2): os.close(errwrite) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: # Child execution failure os._exit(255) # Parent if gc_was_enabled: gc.enable() if p2cread is not None and p2cwrite is not None: os.close(p2cread) if c2pwrite is not None and c2pread is not None: os.close(c2pwrite) if errwrite is not None and errread is not None: os.close(errwrite) def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") def _internal_poll(self, _deadstate=None): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except os.error: if _deadstate is not None: self.returncode = _deadstate return self.returncode def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: pid, sts = os.waitpid(self.pid, 0) self._handle_exitstatus(sts) return self.returncode def _communicate(self, input): read_set = [] write_set = [] stdout = None # Return stderr = None # Return if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if input: write_set.append(self.stdin) else: self.stdin.close() if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr: read_set.append(self.stderr) stderr = [] input_offset = 0 while read_set or write_set: try: rlist, wlist, xlist = select.select(read_set, write_set, []) except select.error, ex: if ex.args[0] == errno.EINTR: continue raise if self.stdin in wlist: # When select has indicated that the file is writable, # we can write up to PIPE_BUF bytes without risk # blocking. POSIX defines PIPE_BUF >= 512 chunk = input[input_offset : input_offset + 512] bytes_written = os.write(self.stdin.fileno(), chunk) input_offset += bytes_written if input_offset >= len(input): self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) stderr.append(data) # All data exchanged. Translate lists into strings. if stdout is not None: stdout = ''.join(stdout) if stderr is not None: stderr = ''.join(stderr) # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process with SIGKILL """ self.send_signal(signal.SIGKILL) clustershell-1.6/lib/ClusterShell/Worker/Ssh.py0000644000130500135250000002606411741571247021172 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell Ssh/Scp support This module implements OpenSSH engine client and task's worker. """ import copy import os from ClusterShell.NodeSet import NodeSet from ClusterShell.Worker.EngineClient import EngineClient from ClusterShell.Worker.Worker import DistantWorker class Ssh(EngineClient): """ Ssh EngineClient. """ def __init__(self, node, command, worker, stderr, timeout, autoclose=False): """ Initialize Ssh EngineClient instance. """ EngineClient.__init__(self, worker, stderr, timeout, autoclose) self.key = copy.copy(node) self.command = command self.popen = None def _start(self): """ Start worker, initialize buffers, prepare command. """ task = self.worker.task # Build ssh command cmd_l = [ task.info("ssh_path") or "ssh", "-a", "-x" ] user = task.info("ssh_user") if user: cmd_l.append("-l") cmd_l.append(user) connect_timeout = task.info("connect_timeout", 0) if connect_timeout > 0: cmd_l.append("-oConnectTimeout=%d" % connect_timeout) # Disable passphrase/password querying cmd_l.append("-oBatchMode=yes") # Add custom ssh options ssh_options = task.info("ssh_options") if ssh_options: cmd_l += ssh_options.split() cmd_l.append("%s" % self.key) cmd_l.append("%s" % self.command) if task.info("debug", False): task.info("print_debug")(task, "SSH: %s" % ' '.join(cmd_l)) self.popen = self._exec_nonblock(cmd_l) self.worker._on_start() return self def _close(self, abort, flush, timeout): """ Close client. See EngineClient._close(). """ if flush and self._rbuf: # We still have some read data available in buffer, but no # EOL. Generate a final message before closing. self.worker._on_node_msgline(self.key, self._rbuf) rc = -1 if abort: prc = self.popen.poll() if prc is None: # process is still running, kill it self.popen.kill() prc = self.popen.wait() if prc >= 0: rc = prc os.close(self.fd_reader) self.fd_reader = None if self.fd_error: os.close(self.fd_error) self.fd_error = None if self.fd_writer: os.close(self.fd_writer) self.fd_writer = None if rc >= 0: self.worker._on_node_rc(self.key, rc) elif timeout: assert abort, "abort flag not set on timeout" self.worker._on_node_timeout(self.key) self.worker._check_fini() def _handle_read(self): """ Handle a read notification. Called by the engine as the result of an event indicating that a read is available. """ # Local variables optimization worker = self.worker task = worker.task key = self.key node_msgline = worker._on_node_msgline debug = task.info("debug", False) if debug: print_debug = task.info("print_debug") for msg in self._readlines(): if debug: print_debug(task, "%s: %s" % (key, msg)) node_msgline(key, msg) # handle full msg line def _handle_error(self): """ Handle a read error (stderr) notification. """ # Local variables optimization worker = self.worker task = worker.task key = self.key node_errline = worker._on_node_errline debug = task.info("debug", False) if debug: print_debug = task.info("print_debug") for msg in self._readerrlines(): if debug: print_debug(task, "%s@STDERR: %s" % (key, msg)) node_errline(key, msg) # handle full stderr line class Scp(Ssh): """ Scp EngineClient. """ def __init__(self, node, source, dest, worker, stderr, timeout, preserve, reverse): """ Initialize Scp instance. """ Ssh.__init__(self, node, None, worker, stderr, timeout) self.source = source self.dest = dest self.popen = None # Preserve modification times and modes? self.preserve = preserve # Reverse copy? self.reverse = reverse # Directory? if self.reverse: self.isdir = os.path.isdir(self.dest) if not self.isdir: raise ValueError("reverse copy dest must be a directory") else: self.isdir = os.path.isdir(self.source) # Note: file sanity checks can be added to Scp._start() as # soon as Task._start_thread is able to dispatch exceptions on # _start (need trac ticket #21). FIXME def _start(self): """ Start client, initialize buffers, prepare command. """ task = self.worker.task # Build scp command cmd_l = [ task.info("scp_path") or "scp" ] if self.isdir: cmd_l.append("-r") if self.preserve: cmd_l.append("-p") connect_timeout = task.info("connect_timeout", 0) if connect_timeout > 0: cmd_l.append("-oConnectTimeout=%d" % connect_timeout) # Disable passphrase/password querying cmd_l.append("-oBatchMode=yes") # Add custom scp options for key in [ "ssh_options", "scp_options" ]: ssh_options = task.info(key) if ssh_options: cmd_l += ssh_options.split() user = task.info("scp_user") or task.info("ssh_user") if self.reverse: if user: cmd_l.append("%s@%s:%s" % (user, self.key, self.source)) else: cmd_l.append("%s:%s" % (self.key, self.source)) cmd_l.append(os.path.join(self.dest, "%s.%s" % \ (os.path.basename(self.source), self.key))) else: cmd_l.append(self.source) if user: cmd_l.append("%s@%s:%s" % (user, self.key, self.dest)) else: cmd_l.append("%s:%s" % (self.key, self.dest)) if task.info("debug", False): task.info("print_debug")(task, "SCP: %s" % ' '.join(cmd_l)) self.popen = self._exec_nonblock(cmd_l) self.worker._on_start() return self class WorkerSsh(DistantWorker): """ ClusterShell ssh-based worker Class. Remote Shell (ssh) usage example: >>> worker = WorkerSsh(nodeset, handler=MyEventHandler(), ... timeout=30, command="/bin/hostname") >>> task.schedule(worker) # schedule worker for execution >>> task.resume() # run Remote Copy (scp) usage example: >>> worker = WorkerSsh(nodeset, handler=MyEventHandler(), ... timeout=30, source="/etc/my.conf", ... dest="/etc/my.conf") >>> task.schedule(worker) # schedule worker for execution >>> task.resume() # run """ def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Ssh worker instance. """ DistantWorker.__init__(self, handler) self.clients = [] self.nodes = NodeSet(nodes) self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) stderr = kwargs.get('stderr', False) self._close_count = 0 self._has_timeout = False # Prepare underlying engine clients (ssh/scp processes) if self.command is not None: # secure remote shell for node in self.nodes: self.clients.append(Ssh(node, self.command, self, stderr, timeout, autoclose)) elif self.source: # secure copy for node in self.nodes: self.clients.append(Scp(node, self.source, self.dest, self, stderr, timeout, kwargs.get('preserve', False), kwargs.get('reverse', False))) else: raise ValueError("missing command or source parameter in " \ "WorkerSsh constructor") def _engine_clients(self): """ Access underlying engine clients. """ return self.clients def _on_node_rc(self, node, rc): DistantWorker._on_node_rc(self, node, rc) self._close_count += 1 def _on_node_timeout(self, node): DistantWorker._on_node_timeout(self, node) self._close_count += 1 self._has_timeout = True def _check_fini(self): if self._close_count >= len(self.clients): handler = self.eh if handler: if self._has_timeout: handler.ev_timeout(self) handler.ev_close(self) def write(self, buf): """ Write to worker clients. """ for client in self.clients: client._write(buf) def set_write_eof(self): """ Tell worker to close its writer file descriptor once flushed. Do not perform writes after this call. """ for client in self.clients: client._set_write_eof() def abort(self): """ Abort processing any action by this worker. """ for client in self.clients: client.abort() clustershell-1.6/lib/ClusterShell/Worker/__init__.py0000644000130500135250000000000011741571247022152 0ustar thiellgpocreclustershell-1.6/lib/ClusterShell/Worker/Popen.py0000644000130500135250000001101511741571247021504 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ WorkerPopen ClusterShell worker for executing local commands. Usage example: >>> worker = WorkerPopen("/bin/uname", key="mykernel") >>> task.schedule(worker) # schedule worker >>> task.resume() # run task >>> worker.retcode() # get return code 0 >>> worker.read() # read command output 'Linux' """ import os from ClusterShell.Worker.Worker import WorkerSimple class WorkerPopen(WorkerSimple): """ Implements the Popen Worker. """ def __init__(self, command, key=None, handler=None, stderr=False, timeout=-1, autoclose=False): """ Initialize Popen worker. """ WorkerSimple.__init__(self, None, None, None, key, handler, stderr, timeout, autoclose) self.command = command if not self.command: raise ValueError("missing command parameter in WorkerPopen " \ "constructor") self.popen = None self.rc = None def _start(self): """ Start worker. """ assert self.popen is None self.popen = self._exec_nonblock(self.command, shell=True) if self.task.info("debug", False): self.task.info("print_debug")(self.task, "POPEN: %s" % self.command) if self.eh: self.eh.ev_start(self) return self def _close(self, abort, flush, timeout): """ Close client. See EngineClient._close(). """ if flush and self._rbuf: # We still have some read data available in buffer, but no # EOL. Generate a final message before closing. self.worker._on_msgline(self._rbuf) rc = -1 if abort: # check if process has terminated prc = self.popen.poll() if prc is None: # process is still running, kill it self.popen.kill() # release process prc = self.popen.wait() # get exit status if prc >= 0: # process exited normally rc = prc elif not abort: # if process was signaled, return 128 + signum (bash-like) rc = 128 + -prc os.close(self.fd_reader) self.fd_reader = None if self.fd_error: os.close(self.fd_error) self.fd_error = None if self.fd_writer: os.close(self.fd_writer) self.fd_writer = None if rc >= 0: # filter valid rc self._on_rc(rc) elif timeout: assert abort, "abort flag not set on timeout" self._on_timeout() if self.eh: self.eh.ev_close(self) def _on_rc(self, rc): """ Set return code. """ self.rc = rc # 1.4- compat WorkerSimple._on_rc(self, rc) def retcode(self): """ Return return code or None if command is still in progress. """ return self.rc clustershell-1.6/lib/ClusterShell/Worker/EngineClient.py0000644000130500135250000003234611741571247023001 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ EngineClient ClusterShell engine's client interface. An engine client is similar to a process, you can start/stop it, read data from it and write data to it. """ import errno import os import Queue import thread from ClusterShell.Worker.fastsubprocess import Popen, PIPE, STDOUT, \ set_nonblock_flag from ClusterShell.Engine.Engine import EngineBaseTimer class EngineClientException(Exception): """Generic EngineClient exception.""" class EngineClientEOF(EngineClientException): """EOF from client.""" class EngineClientError(EngineClientException): """Base EngineClient error exception.""" class EngineClientNotSupportedError(EngineClientError): """Operation not supported by EngineClient.""" class EngineClient(EngineBaseTimer): """ Abstract class EngineClient. """ def __init__(self, worker, stderr, timeout, autoclose): """ Initializer. Should be called from derived classes. """ EngineBaseTimer.__init__(self, timeout, -1, autoclose) # engine-friendly variables self._events = 0 # current configured set of # interesting events (read, # write) for client self._new_events = 0 # new set of interesting events self._reg_epoch = 0 # registration generation number # read-only public self.registered = False # registered on engine or not self.delayable = True # subject to fanout limit self.worker = worker # boolean indicating whether stderr is on a separate fd self._stderr = stderr # associated file descriptors self.fd_error = None self.fd_reader = None self.fd_writer = None # initialize error, read and write buffers self._ebuf = "" self._rbuf = "" self._wbuf = "" self._weof = False # write-ends notification def _fire(self): """ Fire timeout timer. """ if self._engine: self._engine.remove(self, abort=True, did_timeout=True) def _start(self): """ Starts client and returns client instance as a convenience. Derived classes (except EnginePort) must implement. """ raise NotImplementedError("Derived classes must implement.") def error_fileno(self): """ Return the standard error reader file descriptor as an integer. """ return self.fd_error def reader_fileno(self): """ Return the reader file descriptor as an integer. """ return self.fd_reader def writer_fileno(self): """ Return the writer file descriptor as an integer. """ return self.fd_writer def _close(self, abort, flush, timeout): """ Close client. Called by the engine after client has been unregistered. This method should handle all termination types (normal or aborted) with some options like flushing I/O buffers or setting timeout status. Derived classes must implement. """ raise NotImplementedError("Derived classes must implement.") def _set_reading(self): """ Set reading state. """ self._engine.set_reading(self) def _set_reading_error(self): """ Set error reading state. """ self._engine.set_reading_error(self) def _set_writing(self): """ Set writing state. """ self._engine.set_writing(self) def _read(self, size=65536): """ Read data from process. """ result = os.read(self.fd_reader, size) if not len(result): raise EngineClientEOF() self._set_reading() return result def _readerr(self, size=65536): """ Read error data from process. """ result = os.read(self.fd_error, size) if not len(result): raise EngineClientEOF() self._set_reading_error() return result def _handle_read(self): """ Handle a read notification. Called by the engine as the result of an event indicating that a read is available. """ raise NotImplementedError("Derived classes must implement.") def _handle_error(self): """ Handle a stderr read notification. Called by the engine as the result of an event indicating that a read is available on stderr. """ raise NotImplementedError("Derived classes must implement.") def _handle_write(self): """ Handle a write notification. Called by the engine as the result of an event indicating that a write can be performed now. """ if len(self._wbuf) > 0: try: wcnt = os.write(self.fd_writer, self._wbuf) except OSError, exc: if (exc.errno == errno.EAGAIN): self._set_writing() return raise if wcnt > 0: # dequeue written buffer self._wbuf = self._wbuf[wcnt:] # check for possible ending if self._weof and not self._wbuf: self._close_writer() else: self._set_writing() def _exec_nonblock(self, commandlist, shell=False, env=None): """ Utility method to launch a command with stdin/stdout file descriptors configured in non-blocking mode. """ full_env = None if env: full_env = os.environ.copy() full_env.update(env) if self._stderr: stderr_setup = PIPE else: stderr_setup = STDOUT # Launch process in non-blocking mode proc = Popen(commandlist, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=stderr_setup, shell=shell, env=full_env) if self._stderr: self.fd_error = proc.stderr self.fd_reader = proc.stdout self.fd_writer = proc.stdin return proc def _readlines(self): """ Utility method to read client lines """ # read a chunk of data, may raise eof readbuf = self._read() assert len(readbuf) > 0, "assertion failed: len(readbuf) > 0" # Current version implements line-buffered reads. If needed, we could # easily provide direct, non-buffered, data reads in the future. buf = self._rbuf + readbuf lines = buf.splitlines(True) self._rbuf = "" for line in lines: if line.endswith('\n'): if line.endswith('\r\n'): yield line[:-2] # trim CRLF else: # trim LF yield line[:-1] # trim LF else: # keep partial line in buffer self._rbuf = line # breaking here def _readerrlines(self): """ Utility method to read client lines """ # read a chunk of data, may raise eof readerrbuf = self._readerr() assert len(readerrbuf) > 0, "assertion failed: len(readerrbuf) > 0" buf = self._ebuf + readerrbuf lines = buf.splitlines(True) self._ebuf = "" for line in lines: if line.endswith('\n'): if line.endswith('\r\n'): yield line[:-2] # trim CRLF else: # trim LF yield line[:-1] # trim LF else: # keep partial line in buffer self._ebuf = line # breaking here def _write(self, buf): """ Add some data to be written to the client. """ fd = self.fd_writer if fd: self._wbuf += buf # give it a try now (will set writing flag anyhow) self._handle_write() else: # bufferize until pipe is ready self._wbuf += buf def _set_write_eof(self): self._weof = True if not self._wbuf: # sendq empty, try to close writer now self._close_writer() def _close_writer(self): if self.fd_writer is not None: self._engine.unregister_writer(self) os.close(self.fd_writer) self.fd_writer = None def abort(self): """ Abort processing any action by this client. """ if self._engine: self._engine.remove(self, abort=True) class EnginePort(EngineClient): """ An EnginePort is an abstraction object to deliver messages reliably between tasks. """ class _Msg: """Private class representing a port message. A port message may be any Python object. """ def __init__(self, user_msg, sync): self._user_msg = user_msg self._sync_msg = sync self.reply_lock = thread.allocate_lock() self.reply_lock.acquire() def get(self): """ Get and acknowledge message. """ self.reply_lock.release() return self._user_msg def sync(self): """ Wait for message acknowledgment if needed. """ if self._sync_msg: self.reply_lock.acquire() def __init__(self, task, handler=None, autoclose=False): """ Initialize EnginePort object. """ EngineClient.__init__(self, None, False, -1, autoclose) self.task = task self.eh = handler # ports are no subject to fanout self.delayable = False # Port messages queue self._msgq = Queue.Queue(self.task.default("port_qlimit")) # Request pipe (readfd, writefd) = os.pipe() # Set nonblocking flag set_nonblock_flag(readfd) set_nonblock_flag(writefd) self.fd_reader = readfd self.fd_writer = writefd def _start(self): return self def _close(self, abort, flush, timeout): """ Close port pipes. """ if not self._msgq.empty(): # purge msgq try: while not self._msgq.empty(): pmsg = self._msgq.get(block=False) self.task.info("print_debug")(self.task, "EnginePort: dropped msg: %s" % str(pmsg.get())) except Queue.Empty: pass self._msgq = None os.close(self.fd_reader) self.fd_reader = None os.close(self.fd_writer) self.fd_writer = None def _handle_read(self): """ Handle a read notification. Called by the engine as the result of an event indicating that a read is available. """ readbuf = self._read(4096) for dummy_char in readbuf: # raise Empty if empty (should never happen) pmsg = self._msgq.get(block=False) self.eh.ev_msg(self, pmsg.get()) def msg(self, send_msg, send_once=False): """ Port message send method that will wait for acknowledgement unless the send_once parameter if set. """ pmsg = EnginePort._Msg(send_msg, not send_once) self._msgq.put(pmsg, block=True, timeout=None) try: ret = os.write(self.writer_fileno(), "M") except OSError: raise pmsg.sync() return ret == 1 def msg_send(self, send_msg): """ Port message send-once method (no acknowledgement). """ self.msg(send_msg, send_once=True) clustershell-1.6/lib/ClusterShell/Worker/Worker.py0000644000130500135250000004261211741571247021703 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell worker interface. A worker is a generic object which provides "grouped" work in a specific task. """ from ClusterShell.Worker.EngineClient import EngineClient from ClusterShell.NodeSet import NodeSet import os class WorkerException(Exception): """Generic worker exception.""" class WorkerError(WorkerException): """Generic worker error.""" # DEPRECATED: WorkerBadArgumentError exception is deprecated as of 1.4, # use ValueError instead. WorkerBadArgumentError = ValueError class Worker(object): """ Worker is an essential base class for the ClusterShell library. The goal of a worker object is to execute a common work on a single or several targets (abstract notion) in parallel. Concret targets and also the notion of local or distant targets are managed by Worker's subclasses (for example, see the DistantWorker base class). A configured Worker object is associated to a specific ClusterShell Task, which can be seen as a single-threaded Worker supervisor. Indeed, the work to be done is executed in parallel depending on other Workers and Task's current paramaters, like current fanout value. ClusterShell is designed to write event-driven applications, and the Worker class is key here as Worker objects are passed as parameter of most event handlers (see the ClusterShell.Event.EventHandler class). The following public object variables are defined on some events, so you may find them useful in event handlers: - worker.current_node [ev_read,ev_error,ev_hup] node/key concerned by event - worker.current_msg [ev_read] message just read (from stdout) - worker.current_errmsg [ev_error] error message just read (from stderr) - worker.current_rc [ev_hup] return code just received Example of use: >>> from ClusterShell.Event import EventHandler >>> class MyOutputHandler(EventHandler): ... def ev_read(self, worker): ... node = worker.current_node ... line = worker.current_msg ... print "%s: %s" % (node, line) ... """ def __init__(self, handler): """ Initializer. Should be called from derived classes. """ # Associated EventHandler object self.eh = handler # Parent task (once bound) self.task = None self.started = False self.metaworker = None self.metarefcnt = 0 # current_x public variables (updated at each event accordingly) self.current_node = None self.current_msg = None self.current_errmsg = None self.current_rc = 0 def _set_task(self, task): """ Bind worker to task. Called by task.schedule() """ if self.task is not None: # one-shot-only schedule supported for now raise WorkerError("worker has already been scheduled") self.task = task def _task_bound_check(self): if not self.task: raise WorkerError("worker is not task bound") def _engine_clients(self): """ Return a list of underlying engine clients. """ raise NotImplementedError("Derived classes must implement.") def _on_start(self): """ Starting worker. """ if not self.started: self.started = True if self.eh: self.eh.ev_start(self) # Base getters def last_read(self): """ Get last read message from event handler. [DEPRECATED] use current_msg """ raise NotImplementedError("Derived classes must implement.") def last_error(self): """ Get last error message from event handler. [DEPRECATED] use current_errmsg """ raise NotImplementedError("Derived classes must implement.") def did_timeout(self): """ Return whether this worker has aborted due to timeout. """ self._task_bound_check() return self.task._num_timeout_by_worker(self) > 0 # Base actions def abort(self): """ Abort processing any action by this worker. """ raise NotImplementedError("Derived classes must implement.") def flush_buffers(self): """ Flush any messages associated to this worker. """ self._task_bound_check() self.task._flush_buffers_by_worker(self) def flush_errors(self): """ Flush any error messages associated to this worker. """ self._task_bound_check() self.task._flush_errors_by_worker(self) class DistantWorker(Worker): """ Base class DistantWorker, which provides a useful set of setters/getters to use with distant workers like ssh or pdsh. """ def _on_node_msgline(self, node, msg): """ Message received from node, update last* stuffs. """ # Maxoptimize this method as it might be called very often. task = self.task handler = self.eh self.current_node = node self.current_msg = msg if task._msgtree is not None: # don't waste time task._msg_add((self, node), msg) if handler is not None: handler.ev_read(self) def _on_node_errline(self, node, msg): """ Error message received from node, update last* stuffs. """ task = self.task handler = self.eh self.current_node = node self.current_errmsg = msg if task._errtree is not None: task._errmsg_add((self, node), msg) if handler is not None: handler.ev_error(self) def _on_node_rc(self, node, rc): """ Return code received from a node, update last* stuffs. """ self.current_node = node self.current_rc = rc self.task._rc_set((self, node), rc) if self.eh: self.eh.ev_hup(self) def _on_node_timeout(self, node): """ Update on node timeout. """ # Update current_node to allow node resolution after ev_timeout. self.current_node = node self.task._timeout_add((self, node)) def last_node(self): """ Get last node, useful to get the node in an EventHandler callback like ev_read(). [DEPRECATED] use current_node """ return self.current_node def last_read(self): """ Get last (node, buffer), useful in an EventHandler.ev_read() [DEPRECATED] use (current_node, current_msg) """ return self.current_node, self.current_msg def last_error(self): """ Get last (node, error_buffer), useful in an EventHandler.ev_error() [DEPRECATED] use (current_node, current_errmsg) """ return self.current_node, self.current_errmsg def last_retcode(self): """ Get last (node, rc), useful in an EventHandler.ev_hup() [DEPRECATED] use (current_node, current_rc) """ return self.current_node, self.current_rc def node_buffer(self, node): """ Get specific node buffer. """ self._task_bound_check() return self.task._msg_by_source((self, node)) def node_error(self, node): """ Get specific node error buffer. """ self._task_bound_check() return self.task._errmsg_by_source((self, node)) node_error_buffer = node_error def node_retcode(self, node): """ Get specific node return code. Raises a KeyError if command on node has not yet finished (no return code available), or is node is not known by this worker. """ self._task_bound_check() try: rc = self.task._rc_by_source((self, node)) except KeyError: raise KeyError(node) return rc node_rc = node_retcode def iter_buffers(self, match_keys=None): """ Returns an iterator over available buffers and associated NodeSet. If the optional parameter match_keys is defined, only keys found in match_keys are returned. """ self._task_bound_check() for msg, keys in self.task._call_tree_matcher( \ self.task._msgtree.walk, match_keys, self): yield msg, NodeSet.fromlist(keys) def iter_errors(self, match_keys=None): """ Returns an iterator over available error buffers and associated NodeSet. If the optional parameter match_keys is defined, only keys found in match_keys are returned. """ self._task_bound_check() for msg, keys in self.task._call_tree_matcher( \ self.task._errtree.walk, match_keys, self): yield msg, NodeSet.fromlist(keys) def iter_node_buffers(self, match_keys=None): """ Returns an iterator over each node and associated buffer. """ self._task_bound_check() return self.task._call_tree_matcher(self.task._msgtree.items, match_keys, self) def iter_node_errors(self, match_keys=None): """ Returns an iterator over each node and associated error buffer. """ self._task_bound_check() return self.task._call_tree_matcher(self.task._errtree.items, match_keys, self) def iter_retcodes(self, match_keys=None): """ Returns an iterator over return codes and associated NodeSet. If the optional parameter match_keys is defined, only keys found in match_keys are returned. """ self._task_bound_check() for rc, keys in self.task._rc_iter_by_worker(self, match_keys): yield rc, NodeSet.fromlist(keys) def iter_node_retcodes(self): """ Returns an iterator over each node and associated return code. """ self._task_bound_check() return self.task._krc_iter_by_worker(self) def num_timeout(self): """ Return the number of timed out "keys" (ie. nodes) for this worker. """ self._task_bound_check() return self.task._num_timeout_by_worker(self) def iter_keys_timeout(self): """ Iterate over timed out keys (ie. nodes) for a specific worker. """ self._task_bound_check() return self.task._iter_keys_timeout_by_worker(self) class WorkerSimple(EngineClient, Worker): """ Implements a simple Worker being itself an EngineClient. """ def __init__(self, file_reader, file_writer, file_error, key, handler, stderr=False, timeout=-1, autoclose=False): """ Initialize worker. """ Worker.__init__(self, handler) EngineClient.__init__(self, self, stderr, timeout, autoclose) if key is None: # allow key=0 self.key = self else: self.key = key if file_reader: self.fd_reader = file_reader.fileno() if file_error: self.fd_error = file_error.fileno() if file_writer: self.fd_writer = file_writer.fileno() def _engine_clients(self): """ Return a list of underlying engine clients. """ return [self] def set_key(self, key): """ Source key for this worker is free for use. Use this method to set the custom source key for this worker. """ self.key = key def _start(self): """ Called on EngineClient start. """ # call Worker._on_start() self._on_start() return self def _read(self, size=65536): """ Read data from process. """ return EngineClient._read(self, size) def _readerr(self, size=65536): """ Read error data from process. """ return EngineClient._readerr(self, size) def _close(self, abort, flush, timeout): """ Close client. See EngineClient._close(). """ if flush and self._rbuf: # We still have some read data available in buffer, but no # EOL. Generate a final message before closing. self.worker._on_msgline(self._rbuf) if self.fd_reader: os.close(self.fd_reader) if self.fd_error: os.close(self.fd_error) if self.fd_writer: os.close(self.fd_writer) if timeout: assert abort, "abort flag not set on timeout" self._on_timeout() if self.eh: self.eh.ev_close(self) def _handle_read(self): """ Engine is telling us there is data available for reading. """ # Local variables optimization task = self.worker.task msgline = self._on_msgline debug = task.info("debug", False) if debug: print_debug = task.info("print_debug") for msg in self._readlines(): print_debug(task, "LINE %s" % msg) msgline(msg) else: for msg in self._readlines(): msgline(msg) def _handle_error(self): """ Engine is telling us there is error available for reading. """ task = self.worker.task errmsgline = self._on_errmsgline debug = task.info("debug", False) if debug: print_debug = task.info("print_debug") for msg in self._readerrlines(): print_debug(task, "LINE@STDERR %s" % msg) errmsgline(msg) else: for msg in self._readerrlines(): errmsgline(msg) def last_read(self): """ Read last msg, useful in an EventHandler. """ return self.current_msg def last_error(self): """ Get last error message from event handler. """ return self.current_errmsg def _on_msgline(self, msg): """ Add a message. """ # add last msg to local buffer self.current_msg = msg # update task self.task._msg_add((self, self.key), msg) if self.eh: self.eh.ev_read(self) def _on_errmsgline(self, msg): """ Add a message. """ # add last msg to local buffer self.current_errmsg = msg # update task self.task._errmsg_add((self, self.key), msg) if self.eh: self.eh.ev_error(self) def _on_rc(self, rc): """ Set return code received. """ self.current_rc = rc self.task._rc_set((self, self.key), rc) if self.eh: self.eh.ev_hup(self) def _on_timeout(self): """ Update on timeout. """ self.task._timeout_add((self, self.key)) # trigger timeout event if self.eh: self.eh.ev_timeout(self) def read(self): """ Read worker buffer. """ self._task_bound_check() for key, msg in self.task._call_tree_matcher(self.task._msgtree.items, worker=self): assert key == self.key return str(msg) def error(self): """ Read worker error buffer. """ self._task_bound_check() for key, msg in self.task._call_tree_matcher(self.task._errtree.items, worker=self): assert key == self.key return str(msg) def write(self, buf): """ Write to worker. """ self._write(buf) def set_write_eof(self): """ Tell worker to close its writer file descriptor once flushed. Do not perform writes after this call. """ self._set_write_eof() clustershell-1.6/lib/ClusterShell/Worker/Tree.py0000644000130500135250000002342511741571247021332 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell v2 tree propagation worker """ import logging import os from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Worker.Worker import DistantWorker from ClusterShell.Propagation import PropagationTreeRouter class MetaWorkerEventHandler(EventHandler): """ """ def __init__(self, metaworker): self.metaworker = metaworker def ev_start(self, worker): """ Called to indicate that a worker has just started. """ self.metaworker._start_count += 1 self.metaworker._check_ini() def ev_read(self, worker): """ Called to indicate that a worker has data to read. """ self.metaworker._on_node_msgline(worker.current_node, worker.current_msg) def ev_error(self, worker): """ Called to indicate that a worker has error to read (on stderr). """ self.metaworker._on_node_errline(worker.current_node, worker.current_errmsg) def ev_written(self, worker): """ Called to indicate that writing has been done. """ metaworker = self.metaworker metaworker.current_node = worker.current_node metaworker.eh.ev_written(metaworker) def ev_hup(self, worker): """ Called to indicate that a worker's connection has been closed. """ #print >>sys.stderr, "ev_hup?" self.metaworker._on_node_rc(worker.current_node, worker.current_rc) def ev_timeout(self, worker): """ Called to indicate that a worker has timed out (worker timeout only). """ # WARNING!!! this is not possible as metaworking is changing task's # shared timeout set! #for node in worker.iter_keys_timeout(): # self.metaworker._on_node_timeout(node) # we use NodeSet to copy set for node in NodeSet._fromlist1(worker.iter_keys_timeout()): self.metaworker._on_node_timeout(node) def ev_close(self, worker): """ Called to indicate that a worker has just finished (it may already have failed on timeout). """ #self.metaworker._check_fini() pass ##print >>sys.stderr, "ev_close?" #self._completed += 1 #if self._completed >= self.grpcount: # #print >>sys.stderr, "ev_close!" # metaworker = self.metaworker # metaworker.eh.ev_close(metaworker) class WorkerTree(DistantWorker): """ ClusterShell tree worker Class. """ def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Tree worker instance. @param nodes: Targeted nodeset. @param handler: Worker EventHandler. @param timeout: Timeout value for worker. @param command: Command to execute. @param topology: Force specific TopologyTree. @param newroot: Root node of TopologyTree. """ DistantWorker.__init__(self, handler) self.workers = [] self.nodes = NodeSet(nodes) self.timeout = timeout self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) self.stderr = kwargs.get('stderr', False) self._close_count = 0 self._start_count = 0 self._child_count = 0 self._target_count = 0 self._has_timeout = False self.logger = logging.getLogger(__name__) if self.command is not None: pass elif self.source: raise NotImplementedError else: raise ValueError("missing command or source parameter in " \ "WorkerTree constructor") # build gateway invocation command invoke_gw_args = [] for envname in ('PYTHONPATH', \ 'CLUSTERSHELL_GW_LOG_DIR', \ 'CLUSTERSHELL_GW_LOG_LEVEL'): envval = os.getenv(envname) if envval: invoke_gw_args.append("%s=%s" % (envname, envval)) invoke_gw_args.append("python -m ClusterShell/Gateway -Bu") self.invoke_gateway = ' '.join(invoke_gw_args) self.topology = kwargs.get('topology') if self.topology is not None: self.newroot = kwargs.get('newroot') or str(self.topology.root.nodeset) self.router = PropagationTreeRouter(self.newroot, self.topology) else: self.router = None self.upchannel = None self.metahandler = MetaWorkerEventHandler(self) def _set_task(self, task): """ Bind worker to task. Called by task.schedule(). WorkerTree metaworker: override to schedule sub-workers. """ ##if fanout is None: ## fanout = self.router.fanout ##self.task.set_info('fanout', fanout) DistantWorker._set_task(self, task) # Now bound to task - initalize router self.topology = self.topology or task.topology self.router = self.router or task._default_router() # And launch stuffs next_hops = self._distribute(self.task.info("fanout"), self.nodes) for gw, targets in next_hops.iteritems(): if gw == targets: self.logger.debug('task.shell cmd=%s nodes=%s timeout=%d' % \ (self.command, self.nodes, self.timeout)) self._child_count += 1 self._target_count += len(targets) self.workers.append(self.task.shell(self.command, nodes=targets, timeout=self.timeout, handler=self.metahandler, stderr=self.stderr, tree=False)) else: self._execute_remote(self.command, targets, gw, self.timeout) def _distribute(self, fanout, dst_nodeset): """distribute target nodes between next hop gateways""" distribution = {} self.router.fanout = fanout for gw, dstset in self.router.dispatch(dst_nodeset): if gw in distribution: distribution[gw].add(dstset) else: distribution[gw] = dstset return distribution def _execute_remote(self, cmd, targets, gateway, timeout): """run command against a remote node via a gateway""" self.logger.debug("_execute_remote gateway=%s cmd=%s targets=%s" % \ (gateway, cmd, targets)) #self._start_count += 1 #self._child_count += 1 self._target_count += len(targets) self.task.pchannel(gateway, self).shell(nodes=targets, command=cmd, worker=self, timeout=timeout, stderr=self.stderr, gw_invoke_cmd=self.invoke_gateway) def _engine_clients(self): """ Access underlying engine clients. """ return [] def _on_node_rc(self, node, rc): DistantWorker._on_node_rc(self, node, rc) self._close_count += 1 self._check_fini() def _on_node_timeout(self, node): DistantWorker._on_node_timeout(self, node) self._close_count += 1 self._has_timeout = True self._check_fini() def _check_ini(self): self.logger.debug("WorkerTree: _check_ini (%d, %d)" % \ (self._start_count,self._child_count)) if self._start_count >= self._child_count: self.eh.ev_start(self) def _check_fini(self): if self._close_count >= self._target_count: handler = self.eh if handler: if self._has_timeout: handler.ev_timeout(self) handler.ev_close(self) self.task._pchannel_release(self) def write(self, buf): """ Write to worker clients. """ for c in self._engine_clients(): c._write(buf) def set_write_eof(self): """ Tell worker to close its writer file descriptor once flushed. Do not perform writes after this call. """ for c in self._engine_clients(): c._set_write_eof() def abort(self): """ Abort processing any action by this worker. """ for c in self._engine_clients(): c.abort() clustershell-1.6/lib/ClusterShell/Worker/Pdsh.py0000644000130500135250000002605511741571247021333 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ WorkerPdsh ClusterShell worker for executing commands with LLNL pdsh. """ import errno import os import sys from ClusterShell.NodeSet import NodeSet from ClusterShell.Worker.EngineClient import EngineClient from ClusterShell.Worker.EngineClient import EngineClientError from ClusterShell.Worker.EngineClient import EngineClientNotSupportedError from ClusterShell.Worker.Worker import DistantWorker from ClusterShell.Worker.Worker import WorkerError class WorkerPdsh(EngineClient, DistantWorker): """ ClusterShell pdsh-based worker Class. Remote Shell (pdsh) usage example: >>> worker = WorkerPdsh(nodeset, handler=MyEventHandler(), ... timeout=30, command="/bin/hostname") >>> task.schedule(worker) # schedule worker for execution >>> task.resume() # run Remote Copy (pdcp) usage example: >>> worker = WorkerPdsh(nodeset, handler=MyEventHandler(), ... timeout=30, source="/etc/my.conf", ... dest="/etc/my.conf") >>> task.schedule(worker) # schedule worker for execution >>> task.resume() # run Known limitations: - write() is not supported by WorkerPdsh - return codes == 0 are not garanteed when a timeout is used (rc > 0 are fine) """ def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Pdsh worker instance. """ DistantWorker.__init__(self, handler) self.nodes = NodeSet(nodes) self.closed_nodes = NodeSet() self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) stderr = kwargs.get('stderr', False) EngineClient.__init__(self, self, stderr, timeout, autoclose) if self.command is not None: # PDSH self.source = None self.dest = None self.mode = 'pdsh' elif self.source: # PDCP self.command = None self.mode = 'pdcp' # Preserve modification times and modes? self.preserve = kwargs.get('preserve', False) # Reverse copy (rpdcp)? self.reverse = kwargs.get('reverse', False) if self.reverse: self.isdir = os.path.isdir(self.dest) if not self.isdir: raise ValueError("reverse copy dest must be a directory") else: self.isdir = os.path.isdir(self.source) else: raise ValueError("missing command or source parameter in " \ "WorkerPdsh constructor") self.popen = None self._buf = "" def _engine_clients(self): return [self] def _start(self): """ Start worker, initialize buffers, prepare command. """ # Initialize worker read buffer self._buf = "" pdsh_env = {} if self.command is not None: # Build pdsh command executable = self.task.info("pdsh_path") or "pdsh" cmd_l = [ executable, "-b" ] fanout = self.task.info("fanout", 0) if fanout > 0: cmd_l.append("-f %d" % fanout) # Pdsh flag '-t' do not really works well. Better to use # PDSH_SSH_ARGS_APPEND variable to transmit ssh ConnectTimeout # flag. connect_timeout = self.task.info("connect_timeout", 0) if connect_timeout > 0: pdsh_env['PDSH_SSH_ARGS_APPEND'] = "-o ConnectTimeout=%d" % \ connect_timeout command_timeout = self.task.info("command_timeout", 0) if command_timeout > 0: cmd_l.append("-u %d" % command_timeout) cmd_l.append("-w %s" % self.nodes) cmd_l.append("%s" % self.command) if self.task.info("debug", False): self.task.info("print_debug")(self.task, "PDSH: %s" % \ ' '.join(cmd_l)) else: # Build pdcp command if self.reverse: executable = self.task.info('rpdcp_path') or "rpdcp" else: executable = self.task.info("pdcp_path") or "pdcp" cmd_l = [ executable, "-b" ] fanout = self.task.info("fanout", 0) if fanout > 0: cmd_l.append("-f %d" % fanout) connect_timeout = self.task.info("connect_timeout", 0) if connect_timeout > 0: cmd_l.append("-t %d" % connect_timeout) cmd_l.append("-w %s" % self.nodes) if self.isdir: cmd_l.append("-r") if self.preserve: cmd_l.append("-p") cmd_l.append(self.source) cmd_l.append(self.dest) if self.task.info("debug", False): self.task.info("print_debug")(self.task,"PDCP: %s" % \ ' '.join(cmd_l)) self.popen = self._exec_nonblock(cmd_l, env=pdsh_env) self._on_start() return self def write(self, buf): """ Write data to process. Not supported with Pdsh worker. """ raise EngineClientNotSupportedError("writing is not " \ "supported by pdsh worker") def _close(self, abort, flush, timeout): """ Close client. See EngineClient._close(). """ if abort: prc = self.popen.poll() if prc is None: # process is still running, kill it self.popen.kill() prc = self.popen.wait() if prc >= 0: rc = prc if rc != 0: raise WorkerError("Cannot run pdsh (error %d)" % rc) if abort and timeout: if self.eh: self.eh.ev_timeout(self) os.close(self.fd_reader) self.fd_reader = None if self.fd_error: os.close(self.fd_error) self.fd_error = None if self.fd_writer: os.close(self.fd_writer) self.fd_writer = None if timeout: assert abort, "abort flag not set on timeout" for node in (self.nodes - self.closed_nodes): self._on_node_timeout(node) else: for node in (self.nodes - self.closed_nodes): self._on_node_rc(node, 0) if self.eh: self.eh.ev_close(self) def _parse_line(self, line, stderr): """ Parse Pdsh line syntax. """ if line.startswith("pdsh@") or \ line.startswith("pdcp@") or \ line.startswith("sending "): try: # pdsh@cors113: cors115: ssh exited with exit code 1 # 0 1 2 3 4 5 6 7 # corsUNKN: ssh: corsUNKN: Name or service not known # 0 1 2 3 4 5 6 7 # pdsh@fortoy0: fortoy101: command timeout # 0 1 2 3 # sending SIGTERM to ssh fortoy112 pid 32014 # 0 1 2 3 4 5 6 # pdcp@cors113: corsUNKN: ssh exited with exit code 255 # 0 1 2 3 4 5 6 7 # pdcp@cors113: cors115: fatal: /var/cache/shine/... # 0 1 2 3... words = line.split() # Set return code for nodename of worker if self.mode == 'pdsh': if len(words) == 4 and words[2] == "command" and \ words[3] == "timeout": pass elif len(words) == 8 and words[3] == "exited" and \ words[7].isdigit(): self._on_node_rc(words[1][:-1], int(words[7])) elif self.mode == 'pdcp': self._on_node_rc(words[1][:-1], errno.ENOENT) except Exception, exc: print >> sys.stderr, exc raise EngineClientError() else: # split pdsh reply "nodename: msg" nodename, msg = line.split(': ', 1) if stderr: self._on_node_errline(nodename, msg) else: self._on_node_msgline(nodename, msg) def _handle_read(self): """ Engine is telling us a read is available. """ debug = self.task.info("debug", False) if debug: print_debug = self.task.info("print_debug") for msg in self._readlines(): if debug: print_debug(self.task, "PDSH: %s" % msg) self._parse_line(msg, False) def _handle_error(self): """ Engine is telling us an error read is available. """ debug = self.worker.task.info("debug", False) if debug: print_debug = self.worker.task.info("print_debug") for msg in self._readerrlines(): if debug: print_debug(self.task, "PDSH@STDERR: %s" % msg) self._parse_line(msg, True) def _on_node_rc(self, node, rc): """ Return code received from a node, update last* stuffs. """ DistantWorker._on_node_rc(self, node, rc) self.closed_nodes.add(node) clustershell-1.6/lib/ClusterShell/RangeSet.py0000644000130500135250000006457411741571247020704 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Cluster range set module. Instances of RangeSet provide similar operations than the builtin set type, extended to support cluster ranges-like format and stepping support ("0-8/2"). """ __all__ = ['RangeSetException', 'RangeSetParseError', 'RangeSetPaddingError', 'RangeSet'] class RangeSetException(Exception): """Base RangeSet exception class.""" class RangeSetParseError(RangeSetException): """Raised when RangeSet parsing cannot be done properly.""" def __init__(self, part, msg): if part: msg = "%s : \"%s\"" % (msg, part) RangeSetException.__init__(self, msg) # faulty subrange; this allows you to target the error self.part = part class RangeSetPaddingError(RangeSetParseError): """Raised when a fatal padding incoherency occurs""" def __init__(self, part, msg): RangeSetParseError.__init__(self, part, "padding mismatch (%s)" % msg) class RangeSet(set): """ Mutable set of cluster node indexes featuring a fast range-based API. This class aims to ease the management of potentially large cluster range sets and is used by the NodeSet class. RangeSet basic constructors: >>> rset = RangeSet() # empty RangeSet >>> rset = RangeSet("5,10-42") # contains 5, 10 to 42 >>> rset = RangeSet("0-10/2") # contains 0, 2, 4, 6, 8, 10 Since v1.6, any iterable of integers can be specified as first argument: >>> RangeSet([3, 6, 8, 7, 1]) 1,3,6-8 >>> rset2 = RangeSet(rset) Padding of ranges (eg. "003-009") can be managed through a public RangeSet instance variable named padding. It may be changed at any time. Since v1.6, padding is a simple display feature per RangeSet object, thus current padding value is not taken into account when computing set operations. Since v1.6, RangeSet is itself an iterator over its items as integers (instead of strings). To iterate over string items as before (with optional padding), you can now use the RangeSet.striter() method. RangeSet provides methods like union(), intersection(), difference(), symmetric_difference() and their in-place versions update(), intersection_update(), difference_update(), symmetric_difference_update() which conform to the Python Set API. """ _VERSION = 3 # serial version number # define __new__() to workaround built-in set subclassing with Python 2.4 def __new__(cls, pattern=None, autostep=None): """Object constructor""" return set.__new__(cls) def __init__(self, pattern=None, autostep=None): """Initialize RangeSet with optional string pattern and autostep threshold. """ if pattern is None or isinstance(pattern, str): set.__init__(self) else: set.__init__(self, pattern) if isinstance(pattern, RangeSet): self._autostep = pattern._autostep self.padding = pattern.padding else: self._autostep = None self.padding = None self.autostep = autostep if isinstance(pattern, str): self._parse(pattern) def _parse(self, pattern): """Parse string of comma-separated x-y/step -like ranges""" # Comma separated ranges if pattern.find(',') < 0: subranges = [pattern] else: subranges = pattern.split(',') for subrange in subranges: if subrange.find('/') < 0: step = 1 baserange = subrange else: baserange, step = subrange.split('/', 1) try: step = int(step) except ValueError: raise RangeSetParseError(subrange, "cannot convert string to integer") if baserange.find('-') < 0: if step != 1: raise RangeSetParseError(subrange, "invalid step usage") begin = end = baserange else: begin, end = baserange.split('-', 1) # compute padding and return node range info tuple try: pad = 0 if int(begin) != 0: begins = begin.lstrip("0") if len(begin) - len(begins) > 0: pad = len(begin) start = int(begins) else: if len(begin) > 1: pad = len(begin) start = 0 if int(end) != 0: ends = end.lstrip("0") else: ends = end stop = int(ends) except ValueError: raise RangeSetParseError(subrange, "cannot convert string to integer") # check preconditions if stop > 1e100 or start > stop or step < 1: raise RangeSetParseError(subrange, "invalid values in range") self.add_range(start, stop + 1, step, pad) @classmethod def fromlist(cls, rnglist, autostep=None): """Class method that returns a new RangeSet with ranges from provided list.""" inst = RangeSet(autostep=autostep) inst.updaten(rnglist) return inst @classmethod def fromone(cls, index, pad=0, autostep=None): """Class method that returns a new RangeSet of one single item or a single range (from integer or slice object).""" inst = RangeSet(autostep=autostep) # support slice object with duck-typing try: inst.add(index, pad) except TypeError: if not index.stop: raise ValueError("Invalid range upper limit (%s)" % index.stop) inst.add_range(index.start or 0, index.stop, index.step or 1, pad) return inst def get_autostep(self): """Get autostep value (property)""" if self._autostep >= 1E100: return None else: return self._autostep + 1 def set_autostep(self, val): """Set autostep value (property)""" if val is None: # disabled by default for pdsh compat (+inf is 1E400, but a bug in # python 2.4 makes it impossible to be pickled, so we use less) # NOTE: Later, we could consider sys.maxint here self._autostep = 1E100 else: # - 1 because user means node count, but we means real steps self._autostep = int(val) - 1 autostep = property(get_autostep, set_autostep) def _sorted(self): """Get sorted list from inner set.""" return sorted(set.__iter__(self)) def __iter__(self): """Iterate over each element in RangeSet.""" return iter(self._sorted()) def striter(self): """Iterate over each (optionally padded) string element in RangeSet.""" pad = self.padding or 0 for i in self._sorted(): yield "%0*d" % (pad, i) def contiguous(self): """Object-based iterator over contiguous range sets.""" pad = self.padding or 0 for sli in self._contiguous_slices(): yield RangeSet.fromone(slice(sli.start, sli.stop, sli.step), pad) def __reduce__(self): """Return state information for pickling.""" return self.__class__, (str(self),), \ { 'padding': self.padding, \ '_autostep': self._autostep, \ '_version' : RangeSet._VERSION } def __setstate__(self, dic): """called upon unpickling""" self.__dict__.update(dic) if getattr(self, '_version', 0) < RangeSet._VERSION: # unpickle from old version? if getattr(self, '_version', 0) <= 1: # v1 (no object versioning) - CSv1.3 setattr(self, '_ranges', [(slice(start, stop + 1, step), pad) \ for start, stop, step, pad in getattr(self, '_ranges')]) elif hasattr(self, '_ranges'): # v2 - CSv1.4-1.5 self_ranges = getattr(self, '_ranges') if self_ranges and type(self_ranges[0][0]) is not slice: # workaround for object pickled from Python < 2.5 setattr(self, '_ranges', [(slice(start, stop, step), pad) \ for (start, stop, step), pad in self_ranges]) # convert to v3 for sli, pad in getattr(self, '_ranges'): self.add_range(sli.start, sli.stop, sli.step, pad) delattr(self, '_ranges') delattr(self, '_length') def _strslices(self): """Stringify slices list (x-y/step format)""" pad = self.padding or 0 for sli in self.slices(): if sli.start + 1 == sli.stop: yield "%0*d" % (pad, sli.start) else: assert sli.step >= 0, "Internal error: sli.step < 0" if sli.step == 1: yield "%0*d-%0*d" % (pad, sli.start, pad, sli.stop - 1) else: yield "%0*d-%0*d/%d" % (pad, sli.start, pad, sli.stop - 1, \ sli.step) def __str__(self): """Get comma-separated range-based string (x-y/step format).""" return ','.join(self._strslices()) # __repr__ is the same as __str__ as it is a valid expression that # could be used to recreate a RangeSet with the same value __repr__ = __str__ def _contiguous_slices(self): """Internal iterator over contiguous slices in RangeSet.""" k = j = None for i in self._sorted(): if k is None: k = j = i if i - j > 1: yield slice(k, j + 1, 1) k = i j = i if k is not None: yield slice(k, j + 1, 1) def _folded_slices(self): """Internal generator that is able to retrieve ranges organized by step. Complexity: O(n) with n = number of ranges in tree.""" if len(self) == 0: return prng = None # pending range istart = None # processing starting indice m = 0 # processing step for sli in self._contiguous_slices(): start = sli.start stop = sli.stop unitary = (start + 1 == stop) # one indice? if istart is None: # first loop if unitary: istart = start else: prng = [start, stop, 1] istart = stop - 1 i = k = istart elif m == 0: # istart is set but step is unknown if not unitary: if prng is not None: # yield and replace pending range yield slice(*prng) else: yield slice(istart, istart + 1, 1) prng = [start, stop, 1] istart = k = stop - 1 continue i = start else: # step m > 0 assert m > 0 i = start # does current range lead to broken step? if m != i - k or not unitary: #j = i if m == i - k else k if m == i - k: j = i else: j = k # stepped is True when autostep setting does apply stepped = (j - istart >= self._autostep * m) if prng: # yield pending range? if stepped: prng[1] -= 1 else: istart += m yield slice(*prng) prng = None if m != i - k: # case: step value has changed if stepped: yield slice(istart, k + 1, m) else: for j in range(istart, k - m + 1, m): yield slice(j, j + 1, 1) if not unitary: yield slice(k, k + 1, 1) if unitary: if stepped: istart = i = k = start else: istart = k else: prng = [start, stop, 1] istart = i = k = stop - 1 elif not unitary: # case: broken step by contiguous range if stepped: # yield 'range/m' by taking first indice of new range yield slice(istart, i + 1, m) i += 1 else: # autostep setting does not apply in that case for j in range(istart, i - m + 1, m): yield slice(j, j + 1, 1) if stop > i + 1: # current->pending only if not unitary prng = [i, stop, 1] istart = i = k = stop - 1 m = i - k # compute step k = i # exited loop, process pending range or indice... if m == 0: if prng: yield slice(*prng) else: yield slice(istart, istart + 1, 1) else: assert m > 0 stepped = (k - istart >= self._autostep * m) if prng: if stepped: prng[1] -= 1 else: istart += m yield slice(*prng) prng = None if stepped: yield slice(istart, i + 1, m) else: for j in range(istart, i + 1, m): yield slice(j, j + 1, 1) def slices(self): """ Iterate over RangeSet ranges as Python slice objects. """ # return an iterator if self._autostep >= 1E100: return self._contiguous_slices() else: return self._folded_slices() def __getitem__(self, index): """ Return the element at index or a subrange when a slice is specified. """ if isinstance(index, slice): inst = RangeSet() inst._autostep = self._autostep inst.padding = self.padding inst.update(self._sorted()[index]) return inst elif isinstance(index, int): return self._sorted()[index] else: raise TypeError, \ "%s indices must be integers" % self.__class__.__name__ def split(self, nbr): """ Split the rangeset into nbr sub-rangesets (at most). Each sub-rangeset will have the same number of elements more or less 1. Current rangeset remains unmodified. Returns an iterator. >>> RangeSet("1-5").split(3) RangeSet("1-2") RangeSet("3-4") RangeSet("foo5") """ assert(nbr > 0) # We put the same number of element in each sub-nodeset. slice_size = len(self) / nbr left = len(self) % nbr begin = 0 for i in range(0, min(nbr, len(self))): length = slice_size + int(i < left) yield self[begin:begin + length] begin += length def add_range(self, start, stop, step=1, pad=0): """ Add a range (start, stop, step and padding length) to RangeSet. Like the Python built-in function range(), the last element is the largest start + i * step less than stop. """ assert start < stop, "please provide ordered node index ranges" assert step > 0 assert pad >= 0 assert stop - start < 1e9, "range too large" if pad > 0 and self.padding is None: self.padding = pad set.update(self, range(start, stop, step)) def copy(self): """Return a shallow copy of a RangeSet.""" cpy = self.__class__() cpy._autostep = self._autostep cpy.padding = self.padding cpy.update(self) return cpy __copy__ = copy # For the copy module def __eq__(self, other): """ RangeSet equality comparison. """ # Return NotImplemented instead of raising TypeError, to # indicate that the comparison is not implemented with respect # to the other type (the other comparand then gets a change to # determine the result, then it falls back to object address # comparison). if not isinstance(other, RangeSet): return NotImplemented return len(self) == len(other) and self.issubset(other) # Standard set operations: union, intersection, both differences. # Each has an operator version (e.g. __or__, invoked with |) and a # method version (e.g. union). # Subtle: Each pair requires distinct code so that the outcome is # correct when the type of other isn't suitable. For example, if # we did "union = __or__" instead, then Set().union(3) would return # NotImplemented instead of raising TypeError (albeit that *why* it # raises TypeError as-is is also a bit subtle). def _wrap_set_op(self, fun, arg): """Wrap built-in set operations for RangeSet to workaround built-in set base class issues (RangeSet.__new/init__ not called)""" result = fun(self, arg) result._autostep = self._autostep result.padding = self.padding return result def __or__(self, other): """Return the union of two RangeSets as a new RangeSet. (I.e. all elements that are in either set.) """ if not isinstance(other, set): return NotImplemented return self.union(other) def union(self, other): """Return the union of two RangeSets as a new RangeSet. (I.e. all elements that are in either set.) """ return self._wrap_set_op(set.union, other) def __and__(self, other): """Return the intersection of two RangeSets as a new RangeSet. (I.e. all elements that are in both sets.) """ if not isinstance(other, set): return NotImplemented return self.intersection(other) def intersection(self, other): """Return the intersection of two RangeSets as a new RangeSet. (I.e. all elements that are in both sets.) """ return self._wrap_set_op(set.intersection, other) def __xor__(self, other): """Return the symmetric difference of two RangeSets as a new RangeSet. (I.e. all elements that are in exactly one of the sets.) """ if not isinstance(other, set): return NotImplemented return self.symmetric_difference(other) def symmetric_difference(self, other): """Return the symmetric difference of two RangeSets as a new RangeSet. (ie. all elements that are in exactly one of the sets.) """ return self._wrap_set_op(set.symmetric_difference, other) def __sub__(self, other): """Return the difference of two RangeSets as a new RangeSet. (I.e. all elements that are in this set and not in the other.) """ if not isinstance(other, set): return NotImplemented return self.difference(other) def difference(self, other): """Return the difference of two RangeSets as a new RangeSet. (I.e. all elements that are in this set and not in the other.) """ return self._wrap_set_op(set.difference, other) # Membership test def __contains__(self, element): """Report whether an element is a member of a RangeSet. Element can be either another RangeSet object, a string or an integer. (Called in response to the expression `element in self'.) """ if isinstance(element, set): return element.issubset(self) return set.__contains__(self, int(element)) # Subset and superset test def issubset(self, other): """Report whether another set contains this RangeSet.""" self._binary_sanity_check(other) return set.issubset(self, other) def issuperset(self, other): """Report whether this RangeSet contains another set.""" self._binary_sanity_check(other) return set.issuperset(self, other) # Inequality comparisons using the is-subset relation. __le__ = issubset __ge__ = issuperset def __lt__(self, other): self._binary_sanity_check(other) return len(self) < len(other) and self.issubset(other) def __gt__(self, other): self._binary_sanity_check(other) return len(self) > len(other) and self.issuperset(other) # Assorted helpers def _binary_sanity_check(self, other): """Check that the other argument to a binary operation is also a set, raising a TypeError otherwise.""" if not isinstance(other, set): raise TypeError, "Binary operation only permitted between sets" # In-place union, intersection, differences. # Subtle: The xyz_update() functions deliberately return None, # as do all mutating operations on built-in container types. # The __xyz__ spellings have to return self, though. def __ior__(self, other): """Update a RangeSet with the union of itself and another.""" self._binary_sanity_check(other) set.__ior__(self, other) return self def union_update(self, other): """Update a RangeSet with the union of itself and another.""" self.update(other) def __iand__(self, other): """Update a RangeSet with the intersection of itself and another.""" self._binary_sanity_check(other) set.__iand__(self, other) return self def intersection_update(self, other): """Update a RangeSet with the intersection of itself and another.""" set.intersection_update(self, other) def __ixor__(self, other): """Update a RangeSet with the symmetric difference of itself and another.""" self._binary_sanity_check(other) set.symmetric_difference_update(self, other) return self def symmetric_difference_update(self, other): """Update a RangeSet with the symmetric difference of itself and another.""" set.symmetric_difference_update(self, other) def __isub__(self, other): """Remove all elements of another set from this RangeSet.""" self._binary_sanity_check(other) set.difference_update(self, other) return self def difference_update(self, other, strict=False): """Remove all elements of another set from this RangeSet. If strict is True, raise KeyError if an element cannot be removed. (strict is a RangeSet addition)""" if strict and other not in self: raise KeyError(other.difference(self)[0]) set.difference_update(self, other) # Python dict-like mass mutations: update, clear def update(self, iterable): """Add all integers from an iterable (such as a list).""" if isinstance(iterable, RangeSet): # keep padding unless is has not been defined yet if self.padding is None and iterable.padding is not None: self.padding = iterable.padding assert type(iterable) is not str set.update(self, iterable) def updaten(self, rangesets): """ Update a rangeset with the union of itself and several others. """ for rng in rangesets: if isinstance(rng, set): self.update(rng) else: self.update(RangeSet(rng)) # py2.5+ #self.update(rng if isinstance(rng, set) else RangeSet(rng)) def clear(self): """Remove all elements from this RangeSet.""" set.clear(self) self.padding = None # Single-element mutations: add, remove, discard def add(self, element, pad=0): """Add an element to a RangeSet. This has no effect if the element is already present. """ set.add(self, int(element)) if pad > 0 and self.padding is None: self.padding = pad def remove(self, element): """Remove an element from a RangeSet; it must be a member. Raise KeyError if element is not contained in RangeSet. Raise ValueError if element is not castable to integer. """ set.remove(self, int(element)) def discard(self, element): """Remove element from the RangeSet if it is a member. If the element is not a member, do nothing. """ try: i = int(element) set.discard(self, i) except ValueError: pass # ignore other object types clustershell-1.6/lib/ClusterShell/Event.py0000644000130500135250000000624511741571247020244 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Event handler support EventHandler's derived classes may implement ev_* methods to listen on worker's events. """ class EventHandler(object): """ Base class EventHandler. """ def ev_start(self, worker): """ Called to indicate that a worker has just started. """ def ev_read(self, worker): """ Called to indicate that a worker has data to read. """ def ev_error(self, worker): """ Called to indicate that a worker has error to read (on stderr). """ def ev_written(self, worker): """ Called to indicate that writing has been done. """ def ev_hup(self, worker): """ Called to indicate that a worker's connection has been closed. """ def ev_timeout(self, worker): """ Called to indicate that a worker has timed out (worker timeout only). """ def ev_close(self, worker): """ Called to indicate that a worker has just finished (it may already have failed on timeout). """ def ev_msg(self, port, msg): """ Handle port message. @param port: The port object on which a message is available. """ def ev_timer(self, timer): """ Handle firing timer. @param timer: The timer that is firing. """ def _ev_routing(self, worker, arg): """ Routing event (private). Called to indicate that a (meta)worker has just updated one of its route path. You can safely ignore this event. """ clustershell-1.6/lib/ClusterShell/NodeSet.py0000644000130500135250000013010711741571247020517 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Cluster node set module. A module to efficiently deal with node sets and node groups. Instances of NodeSet provide similar operations than the builtin set() type, see http://www.python.org/doc/lib/set-objects.html Usage example ============= >>> # Import NodeSet class ... from ClusterShell.NodeSet import NodeSet >>> >>> # Create a new nodeset from string ... nodeset = NodeSet("cluster[1-30]") >>> # Add cluster32 to nodeset ... nodeset.update("cluster32") >>> # Remove from nodeset ... nodeset.difference_update("cluster[2-5,8-31]") >>> # Print nodeset as a pdsh-like pattern ... print nodeset cluster[1,6-7,32] >>> # Iterate over node names in nodeset ... for node in nodeset: ... print node cluster1 cluster6 cluster7 cluster32 """ import re import sys import ClusterShell.NodeUtils as NodeUtils from ClusterShell.RangeSet import RangeSet, RangeSetParseError # Define default GroupResolver object used by NodeSet DEF_GROUPS_CONFIG = "/etc/clustershell/groups.conf" DEF_RESOLVER_STD_GROUP = NodeUtils.GroupResolverConfig(DEF_GROUPS_CONFIG) # Standard group resolver RESOLVER_STD_GROUP = DEF_RESOLVER_STD_GROUP # Special constants for NodeSet's resolver parameter # RESOLVER_NOGROUP => avoid any group resolution at all # RESOLVER_NOINIT => reserved use for optimized copy() RESOLVER_NOGROUP = -1 RESOLVER_NOINIT = -2 # 1.5 compat (deprecated) STD_GROUP_RESOLVER = RESOLVER_STD_GROUP NOGROUP_RESOLVER = RESOLVER_NOGROUP class NodeSetException(Exception): """Base NodeSet exception class.""" class NodeSetParseError(NodeSetException): """Raised when NodeSet parsing cannot be done properly.""" def __init__(self, part, msg): if part: msg = "%s : \"%s\"" % (msg, part) NodeSetException.__init__(self, msg) # faulty part; this allows you to target the error self.part = part class NodeSetParseRangeError(NodeSetParseError): """Raised when bad range is encountered during NodeSet parsing.""" def __init__(self, rset_exc): NodeSetParseError.__init__(self, str(rset_exc), "bad range") class NodeSetExternalError(NodeSetException): """Raised when an external error is encountered.""" class NodeSetBase(object): """ Base class for NodeSet. This class allows node set base object creation from specified string pattern and rangeset object. If optional copy_rangeset boolean flag is set to True (default), provided rangeset object is copied (if needed), otherwise it may be referenced (should be seen as an ownership transfer upon creation). This class implements core node set arithmetics (no string parsing here). Example: >>> nsb = NodeSetBase('node%s-ipmi', RangeSet('1-5,7'), False) >>> str(nsb) 'node[1-5,7]-ipmi' """ def __init__(self, pattern=None, rangeset=None, copy_rangeset=True): """New NodeSetBase object initializer""" self._length = 0 self._patterns = {} if pattern: self._add(pattern, rangeset, copy_rangeset) elif rangeset: raise ValueError("missing pattern") def _iter(self): """Iterator on internal item tuples (pattern, index, padding).""" for pat, rangeset in sorted(self._patterns.iteritems()): if rangeset: pad = rangeset.padding or 0 for idx in rangeset._sorted(): yield pat, idx, pad else: yield pat, None, None def _iterbase(self): """Iterator on single, one-item NodeSetBase objects.""" for pat, start, pad in self._iter(): if start is not None: yield NodeSetBase(pat, RangeSet.fromone(start, pad)) else: yield NodeSetBase(pat) # no node index def __iter__(self): """Iterator on single nodes as string.""" # Does not call self._iterbase() + str() for better performance. for pat, start, pad in self._iter(): if start is not None: yield pat % ("%0*d" % (pad, start)) else: yield pat # define striter() alias for convenience (to match RangeSet.striter()) striter = __iter__ # define nsiter() as an object-based iterator that could be used for # __iter__() in the future... def nsiter(self): """Object-based NodeSet iterator on single nodes.""" for pat, start, pad in self._iter(): ns = self.__class__() if start is not None: ns._add_new(pat, RangeSet.fromone(start, pad)) else: ns._add_new(pat, None) yield ns def contiguous(self): """Object-based NodeSet iterator on contiguous node sets. Contiguous node set contains nodes with same pattern name and a contiguous range of indexes, like foobar[1-100].""" for pat, rangeset in sorted(self._patterns.iteritems()): ns = self.__class__() if rangeset: for cont_rset in rangeset.contiguous(): ns._add_new(pat, cont_rset) yield ns else: ns._add_new(pat, None) yield ns def __len__(self): """Get the number of nodes in NodeSet.""" cnt = 0 for rangeset in self._patterns.itervalues(): if rangeset: cnt += len(rangeset) else: cnt += 1 return cnt def __str__(self): """Get ranges-based pattern of node list.""" result = "" for pat, rangeset in sorted(self._patterns.iteritems()): if rangeset: rgs = str(rangeset) cnt = len(rangeset) if cnt > 1: rgs = "[" + rgs + "]" result += pat % rgs else: result += pat result += "," return result[:-1] def copy(self): """Return a shallow copy.""" cpy = self.__class__() cpy._length = self._length dic = {} for pat, rangeset in self._patterns.iteritems(): if rangeset is None: dic[pat] = None else: dic[pat] = rangeset.copy() cpy._patterns = dic return cpy def __contains__(self, other): """Is node contained in NodeSet ?""" return self.issuperset(other) def _binary_sanity_check(self, other): # check that the other argument to a binary operation is also # a NodeSet, raising a TypeError otherwise. if not isinstance(other, NodeSetBase): raise TypeError, \ "Binary operation only permitted between NodeSetBase" def issubset(self, other): """Report whether another nodeset contains this nodeset.""" self._binary_sanity_check(other) return other.issuperset(self) def issuperset(self, other): """Report whether this nodeset contains another nodeset.""" self._binary_sanity_check(other) status = True for pat, erangeset in other._patterns.iteritems(): rangeset = self._patterns.get(pat) if rangeset: status = rangeset.issuperset(erangeset) else: # might be an unnumbered node (key in dict but no value) status = self._patterns.has_key(pat) if not status: break return status def __eq__(self, other): """NodeSet equality comparison.""" # See comment for for RangeSet.__eq__() if not isinstance(other, NodeSetBase): return NotImplemented return len(self) == len(other) and self.issuperset(other) # inequality comparisons using the is-subset relation __le__ = issubset __ge__ = issuperset def __lt__(self, other): """x.__lt__(y) <==> x x>y""" self._binary_sanity_check(other) return len(self) > len(other) and self.issuperset(other) def _extractslice(self, index): """Private utility function: extract slice parameters from slice object `index` for an list-like object of size `length`.""" length = len(self) if index.start is None: sl_start = 0 elif index.start < 0: sl_start = max(0, length + index.start) else: sl_start = index.start if index.stop is None: sl_stop = sys.maxint elif index.stop < 0: sl_stop = max(0, length + index.stop) else: sl_stop = index.stop if index.step is None: sl_step = 1 elif index.step < 0: # We support negative step slicing with no start/stop, ie. r[::-n]. if index.start is not None or index.stop is not None: raise IndexError, \ "illegal start and stop when negative step is used" # As RangeSet elements are ordered internally, adjust sl_start # to fake backward stepping in case of negative slice step. stepmod = (length + -index.step - 1) % -index.step if stepmod > 0: sl_start += stepmod sl_step = -index.step else: sl_step = index.step if not isinstance(sl_start, int) or not isinstance(sl_stop, int) \ or not isinstance(sl_step, int): raise TypeError, "slice indices must be integers" return sl_start, sl_stop, sl_step def __getitem__(self, index): """Return the node at specified index or a subnodeset when a slice is specified.""" if isinstance(index, slice): inst = NodeSetBase() sl_start, sl_stop, sl_step = self._extractslice(index) sl_next = sl_start if sl_stop <= sl_next: return inst length = 0 for pat, rangeset in sorted(self._patterns.iteritems()): if rangeset: cnt = len(rangeset) offset = sl_next - length if offset < cnt: num = min(sl_stop - sl_next, cnt - offset) inst._add(pat, rangeset[offset:offset + num:sl_step], False) else: #skip until sl_next is reached length += cnt continue else: cnt = num = 1 if sl_next > length: length += cnt continue inst._add(pat, None) # adjust sl_next... sl_next += num if (sl_next - sl_start) % sl_step: sl_next = sl_start + \ ((sl_next - sl_start)/sl_step + 1) * sl_step if sl_next >= sl_stop: break length += cnt return inst elif isinstance(index, int): if index < 0: length = len(self) if index >= -length: index = length + index # - -index else: raise IndexError, "%d out of range" % index length = 0 for pat, rangeset in sorted(self._patterns.iteritems()): if rangeset: cnt = len(rangeset) if index < length + cnt: # return a subrangeset of size 1 to manage padding return pat % rangeset[index - length:index - length + 1] else: cnt = 1 if index == length: return pat length += cnt raise IndexError, "%d out of range" % index else: raise TypeError, "NodeSet indices must be integers" def _add_new(self, pat, rangeset): """Add nodes from a (pat, rangeset) tuple. Predicate: pattern does not exist in current set. RangeSet object is referenced (not copied).""" if rangeset: # create new pattern self._patterns[pat] = rangeset else: # create new pattern with no rangeset (single node) self._patterns[pat] = None def _add(self, pat, rangeset, copy_rangeset=True): """Add nodes from a (pat, rangeset) tuple. `pat' may be an existing pattern and `rangeset' may be None. RangeSet object is copied if re-used internally when provided and if copy_rangeset flag is set.""" # get patterns dict entry pat_e = self._patterns.get(pat) if pat_e: # don't play with prefix: if there is a value, there is a # rangeset. assert rangeset is not None # add rangeset in corresponding pattern rangeset pat_e.update(rangeset) else: if rangeset and copy_rangeset: rangeset = rangeset.copy() self._add_new(pat, rangeset) def union(self, other): """ s.union(t) returns a new set with elements from both s and t. """ self_copy = self.copy() self_copy.update(other) return self_copy def __or__(self, other): """ Implements the | operator. So s | t returns a new nodeset with elements from both s and t. """ if not isinstance(other, NodeSetBase): return NotImplemented return self.union(other) def add(self, other): """ Add node to NodeSet. """ self.update(other) def update(self, other): """ s.update(t) returns nodeset s with elements added from t. """ self._binary_sanity_check(other) for pat, rangeset in other._patterns.iteritems(): self._add(pat, rangeset) def updaten(self, others): """ s.updaten(list) returns nodeset s with elements added from given list. """ for other in others: self.update(other) def clear(self): """ Remove all nodes from this nodeset. """ self._patterns.clear() def __ior__(self, other): """ Implements the |= operator. So s |= t returns nodeset s with elements added from t. (Python version 2.5+ required) """ self._binary_sanity_check(other) self.update(other) return self def intersection(self, other): """ s.intersection(t) returns a new set with elements common to s and t. """ self_copy = self.copy() self_copy.intersection_update(other) return self_copy def __and__(self, other): """ Implements the & operator. So s & t returns a new nodeset with elements common to s and t. """ if not isinstance(other, NodeSet): return NotImplemented return self.intersection(other) def intersection_update(self, other): """ s.intersection_update(t) returns nodeset s keeping only elements also found in t. """ self._binary_sanity_check(other) if other is self: return tmp_ns = NodeSetBase() for pat, irangeset in other._patterns.iteritems(): rangeset = self._patterns.get(pat) if rangeset: irset = rangeset.intersection(irangeset) # ignore pattern if empty rangeset if len(irset) > 0: tmp_ns._add(pat, irset, False) elif not irangeset and pat in self._patterns: # intersect two nodes with no rangeset tmp_ns._add(pat, None) # Substitute self._patterns = tmp_ns._patterns def __iand__(self, other): """ Implements the &= operator. So s &= t returns nodeset s keeping only elements also found in t. (Python version 2.5+ required) """ self._binary_sanity_check(other) self.intersection_update(other) return self def difference(self, other): """ s.difference(t) returns a new NodeSet with elements in s but not in t. """ self_copy = self.copy() self_copy.difference_update(other) return self_copy def __sub__(self, other): """ Implement the - operator. So s - t returns a new nodeset with elements in s but not in t. """ if not isinstance(other, NodeSetBase): return NotImplemented return self.difference(other) def difference_update(self, other, strict=False): """ s.difference_update(t) returns nodeset s after removing elements found in t. If strict is True, raise KeyError if an element cannot be removed. """ self._binary_sanity_check(other) # the purge of each empty pattern is done afterward to allow self = ns purge_patterns = [] # iterate first over exclude nodeset rangesets which is usually smaller for pat, erangeset in other._patterns.iteritems(): # if pattern is found, deal with it rangeset = self._patterns.get(pat) if rangeset: # sub rangeset, raise KeyError if not found rangeset.difference_update(erangeset, strict) # check if no range left and add pattern to purge list if len(rangeset) == 0: purge_patterns.append(pat) else: # unnumbered node exclusion if self._patterns.has_key(pat): purge_patterns.append(pat) elif strict: raise KeyError, pat for pat in purge_patterns: del self._patterns[pat] def __isub__(self, other): """ Implement the -= operator. So s -= t returns nodeset s after removing elements found in t. (Python version 2.5+ required) """ self._binary_sanity_check(other) self.difference_update(other) return self def remove(self, elem): """ Remove element elem from the nodeset. Raise KeyError if elem is not contained in the nodeset. """ self.difference_update(elem, True) def symmetric_difference(self, other): """ s.symmetric_difference(t) returns the symmetric difference of two nodesets as a new NodeSet. (ie. all nodes that are in exactly one of the nodesets.) """ self_copy = self.copy() self_copy.symmetric_difference_update(other) return self_copy def __xor__(self, other): """ Implement the ^ operator. So s ^ t returns a new NodeSet with nodes that are in exactly one of the nodesets. """ if not isinstance(other, NodeSet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference_update(self, other): """ s.symmetric_difference_update(t) returns nodeset s keeping all nodes that are in exactly one of the nodesets. """ self._binary_sanity_check(other) purge_patterns = [] # iterate over our rangesets for pat, rangeset in self._patterns.iteritems(): brangeset = other._patterns.get(pat) if brangeset: rangeset.symmetric_difference_update(brangeset) else: if other._patterns.has_key(pat): purge_patterns.append(pat) # iterate over other's rangesets for pat, brangeset in other._patterns.iteritems(): rangeset = self._patterns.get(pat) if not rangeset and not pat in self._patterns: self._add(pat, brangeset) # check for patterns cleanup for pat, rangeset in self._patterns.iteritems(): if rangeset is not None and len(rangeset) == 0: purge_patterns.append(pat) # cleanup for pat in purge_patterns: del self._patterns[pat] def __ixor__(self, other): """ Implement the ^= operator. So s ^= t returns nodeset s after keeping all nodes that are in exactly one of the nodesets. (Python version 2.5+ required) """ self._binary_sanity_check(other) self.symmetric_difference_update(other) return self class NodeGroupBase(NodeSetBase): """NodeGroupBase aims to ease node group names management.""" def _add(self, pat, rangeset, copy_rangeset=True): """ Add groups from a (pat, rangeset) tuple. `pat' may be an existing pattern and `rangeset' may be None. """ if pat and pat[0] != '@': raise ValueError("NodeGroup name must begin with character '@'") NodeSetBase._add(self, pat, rangeset, copy_rangeset) class ParsingEngine(object): """ Class that is able to transform a source into a NodeSetBase. """ OP_CODES = { 'update': ',', 'difference_update': '!', 'intersection_update': '&', 'symmetric_difference_update': '^' } def __init__(self, group_resolver): """ Initialize Parsing Engine. """ self.group_resolver = group_resolver self.single_node_re = re.compile("(\D*)(\d*)(.*)") def parse(self, nsobj, autostep): """ Parse provided object if possible and return a NodeSetBase object. """ # passing None is supported if nsobj is None: return NodeSetBase() # is nsobj a NodeSetBase instance? if isinstance(nsobj, NodeSetBase): return nsobj # or is nsobj a string? if type(nsobj) is str: try: return self.parse_string(str(nsobj), autostep) except NodeUtils.GroupSourceQueryFailed, exc: raise NodeSetParseError(nsobj, str(exc)) raise TypeError("Unsupported NodeSet input %s" % type(nsobj)) def parse_string(self, nsstr, autostep): """ Parse provided string and return a NodeSetBase object. """ nodeset = NodeSetBase() for opc, pat, rangeset in self._scan_string(nsstr, autostep): # Parser main debugging: #print "OPC %s PAT %s RANGESET %s" % (opc, pat, rangeset) if self.group_resolver and pat[0] == '@': ns_group = NodeSetBase() for nodegroup in NodeGroupBase(pat, rangeset): # parse/expand nodes group ns_string_ext = self.parse_group_string(nodegroup) if ns_string_ext: # convert result and apply operation ns_group.update(self.parse(ns_string_ext, autostep)) # perform operation getattr(nodeset, opc)(ns_group) else: getattr(nodeset, opc)(NodeSetBase(pat, rangeset, False)) return nodeset def parse_string_single(self, nsstr, autostep): """Parse provided string and return a NodeSetBase object.""" pat, rangeset = self._scan_string_single(nsstr, autostep) return NodeSetBase(pat, rangeset, False) def parse_group(self, group, namespace=None, autostep=None): """Parse provided single group name (without @ prefix).""" assert self.group_resolver is not None nodestr = self.group_resolver.group_nodes(group, namespace) return self.parse(",".join(nodestr), autostep) def parse_group_string(self, nodegroup): """Parse provided group string and return a string.""" assert nodegroup[0] == '@' assert self.group_resolver is not None grpstr = nodegroup[1:] if grpstr.find(':') < 0: # default namespace return ",".join(self.group_resolver.group_nodes(grpstr)) else: # specified namespace namespace, group = grpstr.split(':', 1) return ",".join(self.group_resolver.group_nodes(group, namespace)) def _next_op(self, pat): """Opcode parsing subroutine.""" op_idx = -1 next_op_code = None for opc, idx in [(k, pat.find(v)) \ for k, v in ParsingEngine.OP_CODES.iteritems()]: if idx >= 0 and (op_idx < 0 or idx <= op_idx): next_op_code = opc op_idx = idx return op_idx, next_op_code def _scan_string_single(self, nsstr, autostep): """Single node scan, returns (pat, rangeset)""" # ignore whitespace(s) node = nsstr.strip() if len(node) == 0: raise NodeSetParseError(nsstr, "empty node name") # single node parsing mobj = self.single_node_re.match(node) if not mobj: raise NodeSetParseError(node, "parse error") pfx, idx, sfx = mobj.groups() pfx, sfx = pfx or "", sfx or "" # pfx+sfx cannot be empty if len(pfx) + len(sfx) == 0: raise NodeSetParseError(node, "empty node name") if idx: # optimization: process single index padding directly pad = 0 if int(idx) != 0: idxs = idx.lstrip("0") if len(idx) - len(idxs) > 0: pad = len(idx) idxint = int(idxs) else: if len(idx) > 1: pad = len(idx) idxint = 0 if idxint > 1e100: raise NodeSetParseRangeError( \ RangeSetParseError(idx, "invalid rangeset index")) # optimization: use numerical RangeSet constructor rset = RangeSet.fromone(idxint, pad, autostep) return "%s%%s%s" % (pfx, sfx), rset else: # undefined pad means no node index return pfx, None def _scan_string(self, nsstr, autostep): """Parsing engine's string scanner method (iterator).""" pat = nsstr.strip() # avoid misformatting if pat.find('%') >= 0: pat = pat.replace('%', '%%') next_op_code = 'update' while pat is not None: # Ignore whitespace(s) for convenience pat = pat.lstrip() op_code, next_op_code = next_op_code, None op_idx = -1 op_idx, next_op_code = self._next_op(pat) bracket_idx = pat.find('[') # Check if the operator is after the bracket, or if there # is no operator at all but some brackets. if bracket_idx >= 0 and (op_idx > bracket_idx or op_idx < 0): # In this case, we have a pattern of potentially several # nodes. # Fill prefix, range and suffix from pattern # eg. "forbin[3,4-10]-ilo" -> "forbin", "3,4-10", "-ilo" pfx, sfx = pat.split('[', 1) try: rng, sfx = sfx.split(']', 1) except ValueError: raise NodeSetParseError(pat, "missing bracket") # Check if we have a next op-separated node or pattern op_idx, next_op_code = self._next_op(sfx) if op_idx < 0: pat = None else: sfx, pat = sfx.split(self.OP_CODES[next_op_code], 1) # Ignore whitespace(s) sfx = sfx.rstrip() # pfx + sfx cannot be empty if len(pfx) + len(sfx) == 0: raise NodeSetParseError(pat, "empty node name") # Process comma-separated ranges try: rset = RangeSet(rng, autostep) except RangeSetParseError, ex: raise NodeSetParseRangeError(ex) yield op_code, "%s%%s%s" % (pfx, sfx), rset else: # In this case, either there is no comma and no bracket, # or the bracket is after the comma, then just return # the node. if op_idx < 0: node = pat pat = None # break next time else: node, pat = pat.split(self.OP_CODES[next_op_code], 1) newpat, rset = self._scan_string_single(node, autostep) yield op_code, newpat, rset class NodeSet(NodeSetBase): """ Iterable class of nodes with node ranges support. NodeSet creation examples: >>> nodeset = NodeSet() # empty NodeSet >>> nodeset = NodeSet("cluster3") # contains only cluster3 >>> nodeset = NodeSet("cluster[5,10-42]") >>> nodeset = NodeSet("cluster[0-10/2]") >>> nodeset = NodeSet("cluster[0-10/2],othername[7-9,120-300]") NodeSet provides methods like update(), intersection_update() or difference_update() methods, which conform to the Python Set API. However, unlike RangeSet or standard Set, NodeSet is somewhat not so strict for convenience, and understands NodeSet instance or NodeSet string as argument. Also, there is no strict definition of one element, for example, it IS allowed to do: >>> nodeset = NodeSet("blue[1-50]") >>> nodeset.remove("blue[36-40]") >>> print nodeset blue[1-35,41-50] Additionally, the NodeSet class recognizes the "extended string pattern" which adds support for union (special character ","), difference ("!"), intersection ("&") and symmetric difference ("^") operations. String patterns are read from left to right, by proceeding any character operators accordinately. Extended string pattern usage examples: >>> nodeset = NodeSet("node[0-10],node[14-16]") # union >>> nodeset = NodeSet("node[0-10]!node[8-10]") # difference >>> nodeset = NodeSet("node[0-10]&node[5-13]") # intersection >>> nodeset = NodeSet("node[0-10]^node[5-13]") # xor """ def __init__(self, nodes=None, autostep=None, resolver=None): """ Initialize a NodeSet. The `nodes' argument may be a valid nodeset string or a NodeSet object. If no nodes are specified, an empty NodeSet is created. """ NodeSetBase.__init__(self) self._autostep = autostep # Set group resolver. if resolver in (RESOLVER_NOGROUP, RESOLVER_NOINIT): self._resolver = None else: self._resolver = resolver or RESOLVER_STD_GROUP # Initialize default parser. if resolver == RESOLVER_NOINIT: self._parser = None else: self._parser = ParsingEngine(self._resolver) self.update(nodes) @classmethod def _fromone(cls, single, autostep=None, resolver=None): """Class method that returns a new NodeSet from a single node string (optimized constructor).""" inst = NodeSet(autostep=autostep, resolver=resolver) inst.update(inst._parser.parse_string_single(single, autostep)) return inst @classmethod def _fromlist1(cls, nodelist, autostep=None, resolver=None): """Class method that returns a new NodeSet with single nodes from provided list (optimized constructor).""" inst = NodeSet(autostep=autostep, resolver=resolver) for single in nodelist: inst.update(inst._parser.parse_string_single(single, autostep)) return inst @classmethod def fromlist(cls, nodelist, autostep=None, resolver=None): """Class method that returns a new NodeSet with nodes from provided list.""" inst = NodeSet(autostep=autostep, resolver=resolver) inst.updaten(nodelist) return inst @classmethod def fromall(cls, groupsource=None, autostep=None, resolver=None): """Class method that returns a new NodeSet with all nodes from optional groupsource.""" inst = NodeSet(autostep=autostep, resolver=resolver) if not inst._resolver: raise NodeSetExternalError("No node group resolver") try: # Ask resolver to provide all nodes. for nodes in inst._resolver.all_nodes(groupsource): inst.update(nodes) except NodeUtils.GroupSourceNoUpcall: # As the resolver is not able to provide all nodes directly, # failback to list + map(s) method: try: # Like in regroup(), we get a NodeSet of all groups in # specified group source. allgrpns = NodeSet.fromlist( \ inst._resolver.grouplist(groupsource), resolver=RESOLVER_NOGROUP) # For each individual group, resolve it to node and accumulate. for grp in allgrpns: inst.update(NodeSet.fromlist( \ inst._resolver.group_nodes(grp, groupsource))) except NodeUtils.GroupSourceNoUpcall: # We are not able to find "all" nodes, definitely. raise NodeSetExternalError("Not enough working external " \ "calls (all, or map + list) defined to get all nodes") except NodeUtils.GroupSourceQueryFailed, exc: raise NodeSetExternalError("Unable to get all nodes due to the " \ "following external failure:\n\t%s" % exc) return inst def __getstate__(self): """Called when pickling: remove references to group resolver.""" odict = self.__dict__.copy() del odict['_resolver'] del odict['_parser'] return odict def __setstate__(self, dic): """Called when unpickling: restore parser using non group resolver.""" self.__dict__.update(dic) self._resolver = None self._parser = ParsingEngine(None) def copy(self): """Return a shallow copy of a NodeSet.""" cpy = self.__class__(resolver=RESOLVER_NOINIT) cpy._length = self._length dic = {} for pat, rangeset in self._patterns.iteritems(): if rangeset is None: dic[pat] = None else: dic[pat] = rangeset.copy() cpy._patterns = dic cpy._autostep = self._autostep cpy._resolver = self._resolver cpy._parser = self._parser return cpy __copy__ = copy # For the copy module def _find_groups(self, node, namespace, allgroups): """Find groups of node by namespace.""" if allgroups: # find node groups using in-memory allgroups for grp, nodeset in allgroups.iteritems(): if node in nodeset: yield grp else: # find node groups using resolver for group in self._resolver.node_groups(node, namespace): yield group def _groups2(self, groupsource=None, autostep=None): """Find node groups this nodeset belongs to. [private]""" if not self._resolver: raise NodeSetExternalError("No node group resolver") try: # Get a NodeSet of all groups in specified group source. allgrpns = NodeSet.fromlist(self._resolver.grouplist(groupsource), resolver=RESOLVER_NOGROUP) except NodeUtils.GroupSourceException: # If list query failed, we still might be able to regroup # using reverse. allgrpns = None groups_info = {} allgroups = {} # Check for external reverse presence, and also use the # following heuristic: external reverse is used only when number # of groups is greater than the NodeSet size. if self._resolver.has_node_groups(groupsource) and \ (not allgrpns or len(allgrpns) >= len(self)): # use external reverse pass else: if not allgrpns: # list query failed and no way to reverse! return groups_info # empty try: # use internal reverse: populate allgroups for grp in allgrpns: nodelist = self._resolver.group_nodes(grp, groupsource) allgroups[grp] = NodeSet(",".join(nodelist)) except NodeUtils.GroupSourceQueryFailed, exc: # External result inconsistency raise NodeSetExternalError("Unable to map a group " \ "previously listed\n\tFailed command: %s" % exc) # For each NodeSetBase in self, find its groups. for node in self._iterbase(): for grp in self._find_groups(node, groupsource, allgroups): if grp not in groups_info: nodes = self._parser.parse_group(grp, groupsource, autostep) groups_info[grp] = (1, nodes) else: i, nodes = groups_info[grp] groups_info[grp] = (i + 1, nodes) return groups_info def groups(self, groupsource=None, noprefix=False): """Find node groups this nodeset belongs to. Return a dictionary of the form: group_name => (group_nodeset, contained_nodeset) Group names are always prefixed with "@". If groupsource is provided, they are prefixed with "@groupsource:", unless noprefix is True. """ groups = self._groups2(groupsource, self._autostep) result = {} for grp, (i, nsb) in groups.iteritems(): if groupsource and not noprefix: key = "@%s:%s" % (groupsource, grp) else: key = "@" + grp result[key] = (NodeSet(nsb), self.intersection(nsb)) return result def regroup(self, groupsource=None, autostep=None, overlap=False, noprefix=False): """Regroup nodeset using node groups. Try to find fully matching node groups (within specified groupsource) and return a string that represents this node set (containing these potential node groups). When no matching node groups are found, this method returns the same result as str().""" groups = self._groups2(groupsource, autostep) if not groups: return str(self) # Keep only groups that are full. fulls = [] for k, (i, nodes) in groups.iteritems(): assert i <= len(nodes) if i == len(nodes): fulls.append((i, k)) rest = NodeSet(self, resolver=RESOLVER_NOGROUP) regrouped = NodeSet(resolver=RESOLVER_NOGROUP) bigalpha = lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]) # Build regrouped NodeSet by selecting largest groups first. for num, grp in sorted(fulls, cmp=bigalpha): if not overlap and groups[grp][1] not in rest: continue if groupsource and not noprefix: regrouped.update("@%s:%s" % (groupsource, grp)) else: regrouped.update("@" + grp) rest.difference_update(groups[grp][1]) if not rest: return str(regrouped) if regrouped: return "%s,%s" % (regrouped, rest) return str(rest) def issubset(self, other): """ Report whether another nodeset contains this nodeset. """ nodeset = self._parser.parse(other, self._autostep) return NodeSetBase.issuperset(nodeset, self) def issuperset(self, other): """ Report whether this nodeset contains another nodeset. """ nodeset = self._parser.parse(other, self._autostep) return NodeSetBase.issuperset(self, nodeset) def __getitem__(self, index): """ Return the node at specified index or a subnodeset when a slice is specified. """ base = NodeSetBase.__getitem__(self, index) if not isinstance(base, NodeSetBase): return base # return a real NodeSet inst = NodeSet(autostep=self._autostep, resolver=self._resolver) inst._patterns = base._patterns return inst def split(self, nbr): """ Split the nodeset into nbr sub-nodesets (at most). Each sub-nodeset will have the same number of elements more or less 1. Current nodeset remains unmodified. >>> for nodeset in NodeSet("foo[1-5]").split(3): ... print nodeset foo[1-2] foo[3-4] foo5 """ assert(nbr > 0) # We put the same number of element in each sub-nodeset. slice_size = len(self) / nbr left = len(self) % nbr begin = 0 for i in range(0, min(nbr, len(self))): length = slice_size + int(i < left) yield self[begin:begin + length] begin += length def update(self, other): """ s.update(t) returns nodeset s with elements added from t. """ nodeset = self._parser.parse(other, self._autostep) NodeSetBase.update(self, nodeset) def intersection_update(self, other): """ s.intersection_update(t) returns nodeset s keeping only elements also found in t. """ nodeset = self._parser.parse(other, self._autostep) NodeSetBase.intersection_update(self, nodeset) def difference_update(self, other, strict=False): """ s.difference_update(t) returns nodeset s after removing elements found in t. If strict is True, raise KeyError if an element cannot be removed. """ nodeset = self._parser.parse(other, self._autostep) NodeSetBase.difference_update(self, nodeset, strict) def symmetric_difference_update(self, other): """ s.symmetric_difference_update(t) returns nodeset s keeping all nodes that are in exactly one of the nodesets. """ nodeset = self._parser.parse(other, self._autostep) NodeSetBase.symmetric_difference_update(self, nodeset) def expand(pat): """ Commodity function that expands a nodeset pattern into a list of nodes. """ return list(NodeSet(pat)) def fold(pat): """ Commodity function that clean dups and fold provided pattern with ranges and "/step" support. """ return str(NodeSet(pat)) def grouplist(namespace=None): """ Commodity function that retrieves the list of raw groups for a specified group namespace (or use default namespace). Group names are not prefixed with "@". """ return RESOLVER_STD_GROUP.grouplist(namespace) # doctest def _test(): """run inline doctest""" import doctest doctest.testmod() if __name__ == '__main__': _test() clustershell-1.6/lib/ClusterShell/__init__.py0000644000130500135250000000506311741571247020717 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ClusterShell Python Library ClusterShell is an event-driven open source Python library, designed to run local or distant commands in parallel on server farms or on large clusters. You can use ClusterShell as a building block to create cluster aware administration scripts and system applications in Python. It will take care of common issues encountered on HPC clusters, such as operating on groups of nodes, running distributed commands using optimized execution algorithms, as well as gathering results and merging identical outputs, or retrieving return codes. ClusterShell takes advantage of existing remote shell facilities already installed on your systems, like SSH. Please see first: - ClusterShell.NodeSet - ClusterShell.Task """ __version__ = '1.6' __version_info__ = tuple([ int(_n) for _n in __version__.split('.')]) __date__ = '2012/04/08' __author__ = 'Stephane Thiell ' __url__ = 'http://cea-hpc.github.com/clustershell/' clustershell-1.6/lib/ClusterShell/CLI/0000755000130500135250000000000011741572333017206 5ustar thiellgpocreclustershell-1.6/lib/ClusterShell/CLI/Clush.py0000644000130500135250000010237111741571247020645 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ execute cluster commands in parallel clush is an utility program to run commands on a cluster which benefits from the ClusterShell library and its Ssh worker. It features an integrated output results gathering system (dshbak-like), can get node groups by running predefined external commands and can redirect lines read on its standard input to the remote commands. When no command are specified, clush runs interactively. """ import errno import logging import os import resource import sys import signal import threading from ClusterShell.CLI.Config import ClushConfig, ClushConfigError from ClusterShell.CLI.Display import Display from ClusterShell.CLI.Display import VERB_QUIET, VERB_STD, VERB_VERB, VERB_DEBUG from ClusterShell.CLI.OptionParser import OptionParser from ClusterShell.CLI.Error import GENERIC_ERRORS, handle_generic_error from ClusterShell.CLI.Utils import NodeSet, bufnodeset_cmp from ClusterShell.Event import EventHandler from ClusterShell.MsgTree import MsgTree from ClusterShell.NodeSet import RESOLVER_NOGROUP, RESOLVER_STD_GROUP from ClusterShell.NodeSet import NodeSetParseError from ClusterShell.Task import Task, task_self class UpdatePromptException(Exception): """Exception used by the signal handler""" class StdInputHandler(EventHandler): """Standard input event handler class.""" def __init__(self, worker): EventHandler.__init__(self) self.master_worker = worker def ev_msg(self, port, msg): """invoked when a message is received from port object""" if not msg: self.master_worker.set_write_eof() return # Forward messages to master worker self.master_worker.write(msg) class OutputHandler(EventHandler): """Base class for clush output handlers.""" def __init__(self): EventHandler.__init__(self) self._runtimer = None def runtimer_init(self, task, ntotal): """Init timer for live command-completed progressmeter.""" self._runtimer = task.timer(2.0, RunTimer(task, ntotal), interval=1./3., autoclose=True) def _runtimer_clean(self): """Hide runtimer counter""" if self._runtimer: self._runtimer.eh.erase_line() def _runtimer_set_dirty(self): """Force redisplay of counter""" if self._runtimer: self._runtimer.eh.set_dirty() def _runtimer_finalize(self, worker): """Finalize display of runtimer counter""" if self._runtimer: self._runtimer.eh.finalize(worker.task.default("USER_interactive")) def update_prompt(self, worker): """ If needed, notify main thread to update its prompt by sending a SIGUSR1 signal. We use task-specific user-defined variable to record current states (prefixed by USER_). """ worker.task.set_default("USER_running", False) if worker.task.default("USER_handle_SIGUSR1"): os.kill(os.getpid(), signal.SIGUSR1) class DirectOutputHandler(OutputHandler): """Direct output event handler class.""" def __init__(self, display): OutputHandler.__init__(self) self._display = display def ev_read(self, worker): node = worker.current_node or worker.key self._display.print_line(node, worker.current_msg) def ev_error(self, worker): node = worker.current_node or worker.key self._display.print_line_error(node, worker.current_errmsg) def ev_hup(self, worker): node = worker.current_node or worker.key rc = worker.current_rc if rc > 0: verb = VERB_QUIET if self._display.maxrc: verb = VERB_STD self._display.vprint_err(verb, \ "clush: %s: exited with exit code %d" % (node, rc)) def ev_timeout(self, worker): self._display.vprint_err(VERB_QUIET, "clush: %s: command timeout" % \ NodeSet._fromlist1(worker.iter_keys_timeout())) def ev_close(self, worker): self.update_prompt(worker) class CopyOutputHandler(DirectOutputHandler): """Copy output event handler.""" def __init__(self, display, reverse=False): DirectOutputHandler.__init__(self, display) self.reverse = reverse def ev_close(self, worker): """A copy worker has finished.""" for rc, nodes in worker.iter_retcodes(): if rc == 0: if self.reverse: self._display.vprint(VERB_VERB, "%s:`%s' -> `%s'" % \ (nodes, worker.source, worker.dest)) else: self._display.vprint(VERB_VERB, "`%s' -> %s:`%s'" % \ (worker.source, nodes, worker.dest)) break # multiple copy workers may be running (handled by this task's thread) copies = worker.task.default("USER_copies") - 1 worker.task.set_default("USER_copies", copies) if copies == 0: self._runtimer_finalize(worker) self.update_prompt(worker) class GatherOutputHandler(OutputHandler): """Gathered output event handler class.""" def __init__(self, display): OutputHandler.__init__(self) self._display = display def ev_read(self, worker): if self._display.verbosity == VERB_VERB: node = worker.current_node or worker.key self._display.print_line(node, worker.current_msg) def ev_error(self, worker): self._runtimer_clean() self._display.print_line_error(worker.current_node, worker.current_errmsg) self._runtimer_set_dirty() def ev_close(self, worker): # Worker is closing -- it's time to gather results... self._runtimer_finalize(worker) assert worker.current_node is not None, "cannot gather local command" # Display command output, try to order buffers by rc nodesetify = lambda v: (v[0], NodeSet._fromlist1(v[1])) cleaned = False for rc, nodelist in sorted(worker.iter_retcodes()): # Then order by node/nodeset (see bufnodeset_cmp) for buf, nodeset in sorted(map(nodesetify, worker.iter_buffers(nodelist)), cmp=bufnodeset_cmp): if not cleaned: # clean runtimer line before printing first result self._runtimer_clean() cleaned = True self._display.print_gather(nodeset, buf) self._display.flush() self._close_common(worker) # Notify main thread to update its prompt self.update_prompt(worker) def _close_common(self, worker): verbexit = VERB_QUIET if self._display.maxrc: verbexit = VERB_STD # Display return code if not ok ( != 0) for rc, nodelist in worker.iter_retcodes(): if rc != 0: ns = NodeSet._fromlist1(nodelist) self._display.vprint_err(verbexit, \ "clush: %s: exited with exit code %d" % (ns, rc)) # Display nodes that didn't answer within command timeout delay if worker.num_timeout() > 0: self._display.vprint_err(verbexit, "clush: %s: command timeout" % \ NodeSet._fromlist1(worker.iter_keys_timeout())) class LiveGatherOutputHandler(GatherOutputHandler): """Live line-gathered output event handler class.""" def __init__(self, display, nodes): assert nodes is not None, "cannot gather local command" GatherOutputHandler.__init__(self, display) self._nodes = NodeSet(nodes) self._nodecnt = dict.fromkeys(self._nodes, 0) self._mtreeq = [] self._offload = 0 def ev_read(self, worker): # Read new line from node node = worker.current_node self._nodecnt[node] += 1 cnt = self._nodecnt[node] if len(self._mtreeq) < cnt: self._mtreeq.append(MsgTree()) self._mtreeq[cnt - self._offload - 1].add(node, worker.current_msg) self._live_line(worker) def ev_hup(self, worker): if self._mtreeq and worker.current_node not in self._mtreeq[0]: # forget a node that doesn't answer to continue live line # gathering anyway self._nodes.remove(worker.current_node) self._live_line(worker) def _live_line(self, worker): # if all nodes have replied, display gathered line while self._mtreeq and len(self._mtreeq[0]) == len(self._nodes): mtree = self._mtreeq.pop(0) self._offload += 1 self._runtimer_clean() nodesetify = lambda v: (v[0], NodeSet.fromlist(v[1])) for buf, nodeset in sorted(map(nodesetify, mtree.walk()), cmp=bufnodeset_cmp): self._display.print_gather(nodeset, buf) self._runtimer_set_dirty() def ev_close(self, worker): # Worker is closing -- it's time to gather results... self._runtimer_finalize(worker) for mtree in self._mtreeq: nodesetify = lambda v: (v[0], NodeSet.fromlist(v[1])) for buf, nodeset in sorted(map(nodesetify, mtree.walk()), cmp=bufnodeset_cmp): self._display.print_gather(nodeset, buf) self._close_common(worker) # Notify main thread to update its prompt self.update_prompt(worker) class RunTimer(EventHandler): """Running progress timer event handler""" def __init__(self, task, total): EventHandler.__init__(self) self.task = task self.total = total self.cnt_last = -1 self.tslen = len(str(self.total)) self.wholelen = 0 self.started = False def ev_timer(self, timer): self.update() def set_dirty(self): self.cnt_last = -1 def erase_line(self): if self.wholelen: sys.stderr.write(' ' * self.wholelen + '\r') def update(self): cnt = len(self.task._engine.clients()) if cnt != self.cnt_last: self.cnt_last = cnt # display completed/total clients towrite = 'clush: %*d/%*d\r' % (self.tslen, self.total - cnt, self.tslen, self.total) self.wholelen = len(towrite) sys.stderr.write(towrite) self.started = True def finalize(self, force_cr): """finalize display of runtimer""" if not self.started: return # display completed/total clients fmt = 'clush: %*d/%*d' if force_cr: fmt += '\n' else: fmt += '\r' sys.stderr.write(fmt % (self.tslen, self.total, self.tslen, self.total)) def signal_handler(signum, frame): """Signal handler used for main thread notification""" if signum == signal.SIGUSR1: signal.signal(signal.SIGUSR1, signal.SIG_IGN) raise UpdatePromptException() def get_history_file(): """Turn the history file path""" return os.path.join(os.environ["HOME"], ".clush_history") def readline_setup(): """ Configure readline to automatically load and save a history file named .clush_history """ import readline readline.parse_and_bind("tab: complete") readline.set_completer_delims("") try: readline.read_history_file(get_history_file()) except IOError: pass def ttyloop(task, nodeset, timeout, display): """Manage the interactive prompt to run command""" readline_avail = False if task.default("USER_interactive"): try: import readline readline_setup() readline_avail = True except ImportError: pass display.vprint(VERB_STD, \ "Enter 'quit' to leave this interactive mode") rc = 0 ns = NodeSet(nodeset) ns_info = True cmd = "" while task.default("USER_running") or cmd.lower() != 'quit': try: if task.default("USER_interactive") and \ not task.default("USER_running"): if ns_info: display.vprint(VERB_QUIET, \ "Working with nodes: %s" % ns) ns_info = False prompt = "clush> " else: prompt = "" # Set SIGUSR1 handler if needed if task.default("USER_handle_SIGUSR1"): signal.signal(signal.SIGUSR1, signal_handler) try: cmd = raw_input(prompt) finally: signal.signal(signal.SIGUSR1, signal.SIG_IGN) except EOFError: print return except UpdatePromptException: if task.default("USER_interactive"): continue return except KeyboardInterrupt, kbe: if display.gather: # Suspend task, so we can safely access its data from # the main thread task.suspend() print_warn = False # Display command output, but cannot order buffers by rc nodesetify = lambda v: (v[0], NodeSet._fromlist1(v[1])) for buf, nodeset in sorted(map(nodesetify, task.iter_buffers()), cmp=bufnodeset_cmp): if not print_warn: print_warn = True display.vprint_err(VERB_STD, \ "Warning: Caught keyboard interrupt!") display.print_gather(nodeset, buf) # Return code handling verbexit = VERB_QUIET if display.maxrc: verbexit = VERB_STD ns_ok = NodeSet() for rc, nodelist in task.iter_retcodes(): ns_ok.add(NodeSet._fromlist1(nodelist)) if rc != 0: # Display return code if not ok ( != 0) ns = NodeSet._fromlist1(nodelist) display.vprint_err(verbexit, \ "clush: %s: exited with exit code %s" % (ns, rc)) # Add uncompleted nodeset to exception object kbe.uncompleted_nodes = ns - ns_ok # Display nodes that didn't answer within command timeout delay if task.num_timeout() > 0: display.vprint_err(verbexit, \ "clush: %s: command timeout" % \ NodeSet._fromlist1(task.iter_keys_timeout())) raise kbe if task.default("USER_running"): ns_reg, ns_unreg = NodeSet(), NodeSet() for c in task._engine.clients(): if c.registered: ns_reg.add(c.key) else: ns_unreg.add(c.key) if ns_unreg: pending = "\nclush: pending(%d): %s" % (len(ns_unreg), ns_unreg) else: pending = "" display.vprint_err(VERB_QUIET, "clush: interrupt (^C to " \ "abort task)\nclush: in progress(%d): %s%s" % (len(ns_reg), \ ns_reg, pending)) else: cmdl = cmd.lower() try: ns_info = True if cmdl.startswith('+'): ns.update(cmdl[1:]) elif cmdl.startswith('-'): ns.difference_update(cmdl[1:]) elif cmdl.startswith('@'): ns = NodeSet(cmdl[1:]) elif cmdl == '=': display.gather = not display.gather if display.gather: display.vprint(VERB_STD, \ "Switching to gathered output format") else: display.vprint(VERB_STD, \ "Switching to standard output format") task.set_default("stdout_msgtree", \ display.gather or display.line_mode) ns_info = False continue elif not cmdl.startswith('?'): # if ?, just print ns_info ns_info = False except NodeSetParseError: display.vprint_err(VERB_QUIET, \ "clush: nodeset parse error (ignoring)") if ns_info: continue if cmdl.startswith('!') and len(cmd.strip()) > 0: run_command(task, cmd[1:], None, timeout, display) elif cmdl != "quit": if not cmd: continue if readline_avail: readline.write_history_file(get_history_file()) run_command(task, cmd, ns, timeout, display) return rc def _stdin_thread_start(stdin_port): """Standard input reader thread entry point.""" # Note: read length should be larger and a multiple of 4096 for best # performance to avoid excessive unreg/register of writer fd in # engine; however, it shouldn't be too large. bufsize = 4096 * 8 # thread loop: blocking read stdin + send messages to specified # port object buf = sys.stdin.read(bufsize) while buf: # send message to specified port object (with ack) stdin_port.msg(buf) buf = sys.stdin.read(bufsize) # send a None message to indicate EOF stdin_port.msg(None) def bind_stdin(worker): """Create a stdin->port->worker binding: connect specified worker to stdin with the help of a reader thread and a ClusterShell Port object.""" assert not sys.stdin.isatty() # Create a ClusterShell Port object bound to worker's task. This object # is able to receive messages in a thread-safe manner and then will safely # trigger ev_msg() on a specified event handler. port = worker.task.port(handler=StdInputHandler(worker), autoclose=True) # Launch a dedicated thread to read stdin in blocking mode. Indeed stdin # can be a file, so we cannot use a WorkerSimple here as polling on file # may result in different behaviors depending on selected engine. threading.Thread(None, _stdin_thread_start, args=(port,)).start() def run_command(task, cmd, ns, timeout, display): """ Create and run the specified command line, displaying results in a dshbak way when gathering is used. """ task.set_default("USER_running", True) if display.verbosity >= VERB_VERB and task.topology: print Display.COLOR_RESULT_FMT % '-' * 15 print Display.COLOR_RESULT_FMT % task.topology, print Display.COLOR_RESULT_FMT % '-' * 15 if (display.gather or display.line_mode) and ns is not None: if display.gather and display.line_mode: handler = LiveGatherOutputHandler(display, ns) else: handler = GatherOutputHandler(display) if display.verbosity == VERB_STD or display.verbosity == VERB_VERB: handler.runtimer_init(task, len(ns)) worker = task.shell(cmd, nodes=ns, handler=handler, timeout=timeout) else: worker = task.shell(cmd, nodes=ns, handler=DirectOutputHandler(display), timeout=timeout) if ns is None: worker.set_key('LOCAL') if task.default("USER_stdin_worker"): bind_stdin(worker) task.resume() def run_copy(task, sources, dest, ns, timeout, preserve_flag, display): """ run copy command """ task.set_default("USER_running", True) task.set_default("USER_copies", len(sources)) copyhandler = CopyOutputHandler(display) if display.verbosity == VERB_STD or display.verbosity == VERB_VERB: copyhandler.runtimer_init(task, len(ns) * len(sources)) # Sources check for source in sources: if not os.path.exists(source): display.vprint_err(VERB_QUIET, "ERROR: file \"%s\" not found" % \ source) clush_exit(1) task.copy(source, dest, ns, handler=copyhandler, timeout=timeout, preserve=preserve_flag) task.resume() def run_rcopy(task, sources, dest, ns, timeout, preserve_flag, display): """ run reverse copy command """ task.set_default("USER_running", True) task.set_default("USER_copies", len(sources)) # Sanity checks if not os.path.exists(dest): display.vprint_err(VERB_QUIET, "ERROR: directory \"%s\" not found" % \ dest) clush_exit(1) if not os.path.isdir(dest): display.vprint_err(VERB_QUIET, \ "ERROR: destination \"%s\" is not a directory" % dest) clush_exit(1) copyhandler = CopyOutputHandler(display, True) if display.verbosity == VERB_STD or display.verbosity == VERB_VERB: copyhandler.runtimer_init(task, len(ns) * len(sources)) for source in sources: task.rcopy(source, dest, ns, handler=copyhandler, timeout=timeout, preserve=preserve_flag) task.resume() def set_fdlimit(fd_max, display): """Make open file descriptors soft limit the max.""" soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) if hard < fd_max: display.vprint(VERB_DEBUG, "Warning: Consider increasing max open " \ "files hard limit (%d)" % hard) rlim_max = min(hard, fd_max) if soft != rlim_max: display.vprint(VERB_DEBUG, "Modifying max open files soft limit: " \ "%d -> %d" % (soft, rlim_max)) resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_max, hard)) def clush_exit(status): """Flush stdio buffers and exit script.""" for stream in [sys.stdout, sys.stderr]: stream.flush() # Use os._exit to avoid threads cleanup os._exit(status) def clush_excepthook(extype, value, traceback): """Exceptions hook for clush: this method centralizes exception handling from main thread and from (possible) separate task thread. This hook has to be previously installed on startup by overriding sys.excepthook and task.excepthook.""" try: raise extype, value except ClushConfigError, econf: print >> sys.stderr, "ERROR: %s" % econf except KeyboardInterrupt, kbe: uncomp_nodes = getattr(kbe, 'uncompleted_nodes', None) if uncomp_nodes: print >> sys.stderr, \ "Keyboard interrupt (%s did not complete)." % uncomp_nodes else: print >> sys.stderr, "Keyboard interrupt." clush_exit(128 + signal.SIGINT) except OSError, value: print >> sys.stderr, "ERROR: %s" % value if value.errno == errno.EMFILE: print >> sys.stderr, "ERROR: current `nofile' limits: " \ "soft=%d hard=%d" % resource.getrlimit(resource.RLIMIT_NOFILE) clush_exit(1) except GENERIC_ERRORS, exc: clush_exit(handle_generic_error(exc)) # Error not handled task_self().default_excepthook(extype, value, traceback) def main(): """clush script entry point""" sys.excepthook = clush_excepthook # Default values nodeset_base, nodeset_exclude = NodeSet(), NodeSet() # # Argument management # usage = "%prog [options] command" parser = OptionParser(usage) parser.add_option("--nostdin", action="store_true", dest="nostdin", help="don't watch for possible input from stdin") parser.install_nodes_options() parser.install_display_options(verbose_options=True) parser.install_filecopy_options() parser.install_ssh_options() (options, args) = parser.parse_args() # # Load config file and apply overrides # config = ClushConfig(options) # Should we use ANSI colors for nodes? if config.color == "auto": color = sys.stdout.isatty() and (options.gatherall or \ sys.stderr.isatty()) else: color = config.color == "always" try: # Create and configure display object. display = Display(options, config, color) except ValueError, exc: parser.error("option mismatch (%s)" % exc) # # Compute the nodeset # if options.nodes: nodeset_base = NodeSet.fromlist(options.nodes) if options.exclude: nodeset_exclude = NodeSet.fromlist(options.exclude) if options.groupsource: # Be sure -a/g -s source work as espected. RESOLVER_STD_GROUP.default_sourcename = options.groupsource # FIXME: add public API to enforce engine Task._std_default['engine'] = options.engine # Do we have nodes group? task = task_self() task.set_info("debug", config.verbosity >= VERB_DEBUG) if config.verbosity == VERB_DEBUG: RESOLVER_STD_GROUP.set_verbosity(1) if options.nodes_all: all_nodeset = NodeSet.fromall() display.vprint(VERB_DEBUG, "Adding nodes from option -a: %s" % \ all_nodeset) nodeset_base.add(all_nodeset) if options.group: grp_nodeset = NodeSet.fromlist(options.group, resolver=RESOLVER_NOGROUP) for grp in grp_nodeset: addingrp = NodeSet("@" + grp) display.vprint(VERB_DEBUG, \ "Adding nodes from option -g %s: %s" % (grp, addingrp)) nodeset_base.update(addingrp) if options.exgroup: grp_nodeset = NodeSet.fromlist(options.exgroup, resolver=RESOLVER_NOGROUP) for grp in grp_nodeset: removingrp = NodeSet("@" + grp) display.vprint(VERB_DEBUG, \ "Excluding nodes from option -X %s: %s" % (grp, removingrp)) nodeset_exclude.update(removingrp) # Do we have an exclude list? (-x ...) nodeset_base.difference_update(nodeset_exclude) if len(nodeset_base) < 1: parser.error('No node to run on.') # Set open files limit. set_fdlimit(config.fd_max, display) # # Task management # # check for clush interactive mode interactive = not len(args) and \ not (options.copy or options.rcopy) # check for foreground ttys presence (input) stdin_isafgtty = sys.stdin.isatty() and \ os.tcgetpgrp(sys.stdin.fileno()) == os.getpgrp() # check for special condition (empty command and stdin not a tty) if interactive and not stdin_isafgtty: # looks like interactive but stdin is not a tty: # switch to non-interactive + disable ssh pseudo-tty interactive = False # SSH: disable pseudo-tty allocation (-T) ssh_options = config.ssh_options or '' ssh_options += ' -T' config._set_main("ssh_options", ssh_options) if options.nostdin and interactive: parser.error("illegal option `--nostdin' in that case") # Force user_interaction if Clush._f_user_interaction for test purposes user_interaction = hasattr(sys.modules[__name__], '_f_user_interaction') if not options.nostdin: # Try user interaction: check for foreground ttys presence (ouput) stdout_isafgtty = sys.stdout.isatty() and \ os.tcgetpgrp(sys.stdout.fileno()) == os.getpgrp() user_interaction |= stdin_isafgtty and stdout_isafgtty display.vprint(VERB_DEBUG, "User interaction: %s" % user_interaction) if user_interaction: # Standard input is a terminal and we want to perform some user # interactions in the main thread (using blocking calls), so # we run cluster commands in a new ClusterShell Task (a new # thread is created). task = Task() # else: perform everything in the main thread # Handle special signal only when user_interaction is set task.set_default("USER_handle_SIGUSR1", user_interaction) task.excepthook = sys.excepthook task.set_default("USER_stdin_worker", not (sys.stdin.isatty() or \ options.nostdin or \ user_interaction)) display.vprint(VERB_DEBUG, "Create STDIN worker: %s" % \ task.default("USER_stdin_worker")) if config.verbosity >= VERB_DEBUG: task.set_info("debug", True) logging.basicConfig(level=logging.DEBUG) logging.debug("clush: STARTING DEBUG") task.set_info("fanout", config.fanout) if options.topofile: if config.verbosity >= VERB_VERB: print Display.COLOR_RESULT_FMT % \ "Enabling TREE MODE (technology preview)" task.set_default("auto_tree", True) task.set_topology(options.topofile) if options.grooming_delay: if config.verbosity >= VERB_VERB: print Display.COLOR_RESULT_FMT % ("Grooming delay: %f" % \ options.grooming_delay) task.set_info("grooming_delay", options.grooming_delay) if config.ssh_user: task.set_info("ssh_user", config.ssh_user) if config.ssh_path: task.set_info("ssh_path", config.ssh_path) if config.ssh_options: task.set_info("ssh_options", config.ssh_options) # Set detailed timeout values task.set_info("connect_timeout", config.connect_timeout) command_timeout = config.command_timeout task.set_info("command_timeout", command_timeout) # Enable stdout/stderr separation task.set_default("stderr", not options.gatherall) # Disable MsgTree buffering if not gathering outputs task.set_default("stdout_msgtree", display.gather or display.line_mode) # Always disable stderr MsgTree buffering task.set_default("stderr_msgtree", False) # Set timeout at worker level when command_timeout is defined. if command_timeout > 0: timeout = command_timeout else: timeout = -1 # Configure task custom status task.set_default("USER_interactive", interactive) task.set_default("USER_running", False) if (options.copy or options.rcopy) and not args: parser.error("--[r]copy option requires at least one argument") if options.copy: if not options.dest_path: options.dest_path = os.path.dirname(os.path.abspath(args[0])) op = "copy sources=%s dest=%s" % (args, options.dest_path) elif options.rcopy: if not options.dest_path: options.dest_path = os.path.dirname(os.path.abspath(args[0])) op = "rcopy sources=%s dest=%s" % (args, options.dest_path) else: op = "command=\"%s\"" % ' '.join(args) # print debug values (fanout value is get from the config object # and not task itself as set_info() is an asynchronous call. display.vprint(VERB_DEBUG, "clush: nodeset=%s fanout=%d [timeout " \ "conn=%.1f cmd=%.1f] %s" % (nodeset_base, config.fanout, task.info("connect_timeout"), task.info("command_timeout"), op)) if not task.default("USER_interactive"): if options.copy: run_copy(task, args, options.dest_path, nodeset_base, 0, options.preserve_flag, display) elif options.rcopy: run_rcopy(task, args, options.dest_path, nodeset_base, 0, options.preserve_flag, display) else: run_command(task, ' '.join(args), nodeset_base, timeout, display) if user_interaction: ttyloop(task, nodeset_base, timeout, display) elif task.default("USER_interactive"): display.vprint_err(VERB_QUIET, \ "ERROR: interactive mode requires a tty") clush_exit(1) rc = 0 if options.maxrc: # Instead of clush return code, return commands retcode rc = task.max_retcode() if task.num_timeout() > 0: rc = 255 clush_exit(rc) if __name__ == '__main__': main() clustershell-1.6/lib/ClusterShell/CLI/OptionParser.py0000644000130500135250000003533111741571247022215 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ common ClusterShell CLI OptionParser With few exceptions, ClusterShell command-lines share most option arguments. This module provides a common OptionParser class. """ from copy import copy import optparse from ClusterShell import __version__ from ClusterShell.Engine.Factory import PreferredEngine from ClusterShell.CLI.Display import THREE_CHOICES def check_safestring(option, opt, value): """type-checker function for safestring""" try: safestr = str(value) # check if the string is not empty and not an option if not safestr or safestr.startswith('-'): raise ValueError() return safestr except ValueError: raise optparse.OptionValueError( "option %s: invalid value: %r" % (opt, value)) class Option(optparse.Option): """This Option subclass adds a new safestring type.""" TYPES = optparse.Option.TYPES + ("safestring",) TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER) TYPE_CHECKER["safestring"] = check_safestring class OptionParser(optparse.OptionParser): """Derived OptionParser for all CLIs""" def __init__(self, usage, **kwargs): """Initialize ClusterShell CLI OptionParser""" optparse.OptionParser.__init__(self, usage, version="%%prog %s" % __version__, option_class=Option, **kwargs) # Set parsing to stop on the first non-option self.disable_interspersed_args() # Always install groupsource support self.add_option("-s", "--groupsource", action="store", type="safestring", dest="groupsource", help="optional groups.conf(5) group source to use") def install_nodes_options(self): """Install nodes selection options""" optgrp = optparse.OptionGroup(self, "Selecting target nodes") optgrp.add_option("-w", action="append", type="safestring", dest="nodes", help="nodes where to run the command") optgrp.add_option("-x", action="append", type="safestring", dest="exclude", metavar="NODES", help="exclude nodes from the node list") optgrp.add_option("-a", "--all", action="store_true", dest="nodes_all", help="run command on all nodes") optgrp.add_option("-g", "--group", action="append", type="safestring", dest="group", help="run command on a group of nodes") optgrp.add_option("-X", action="append", dest="exgroup", metavar="GROUP", type="safestring", help="exclude nodes from this group") optgrp.add_option("-E", "--engine", action="store", dest="engine", choices=["auto"] + PreferredEngine.engines.keys(), default="auto", help=optparse.SUPPRESS_HELP) optgrp.add_option("-T", "--topology", action="store", dest="topofile", default=None, help=optparse.SUPPRESS_HELP) self.add_option_group(optgrp) def install_display_options(self, debug_option=True, verbose_options=False, separator_option=False, dshbak_compat=False, msgtree_mode=False): """Install options needed by Display class""" optgrp = optparse.OptionGroup(self, "Output behaviour") if verbose_options: optgrp.add_option("-q", "--quiet", action="store_true", dest="quiet", help="be quiet, print essential output only") optgrp.add_option("-v", "--verbose", action="store_true", dest="verbose", help="be verbose, print informative messages") if debug_option: optgrp.add_option("-d", "--debug", action="store_true", dest="debug", help="output more messages for debugging purpose") optgrp.add_option("-G", "--groupbase", action="store_true", dest="groupbase", default=False, help="do not display group source prefix") optgrp.add_option("-L", action="store_true", dest="line_mode", help="disable header block and order output by nodes") optgrp.add_option("-N", action="store_false", dest="label", default=True, help="disable labeling of command line") if dshbak_compat: optgrp.add_option("-b", "-c", "--dshbak", action="store_true", dest="gather", help="gather nodes with same output") else: optgrp.add_option("-b", "--dshbak", action="store_true", dest="gather", help="gather nodes with same output") optgrp.add_option("-B", action="store_true", dest="gatherall", default=False, help="like -b but including standard error") optgrp.add_option("-r", "--regroup", action="store_true", dest="regroup", default=False, help="fold nodeset using node groups") if separator_option: optgrp.add_option("-S", "--separator", action="store", dest="separator", default=':', help="node / line content separator string " \ "(default: ':')") else: optgrp.add_option("-S", action="store_true", dest="maxrc", help="return the largest of command return codes") if msgtree_mode: # clubak specific optgrp.add_option("-F", "--fast", action="store_true", dest="fast_mode", help="faster but memory hungry mode") optgrp.add_option("-T", "--tree", action="store_true", dest="trace_mode", help="message tree trace mode") optgrp.add_option("--interpret-keys", action="store", dest="interpret_keys", choices=THREE_CHOICES, default=THREE_CHOICES[-1], help="whether to " \ "interpret keys (never, always or auto)") optgrp.add_option("--color", action="store", dest="whencolor", choices=THREE_CHOICES, help="whether to use ANSI " \ "colors (never, always or auto)") optgrp.add_option("--diff", action="store_true", dest="diff", help="show diff between gathered outputs") self.add_option_group(optgrp) def _copy_callback(self, option, opt_str, value, parser): """special callback method for copy and rcopy toggles""" # enable interspersed args again self.enable_interspersed_args() # set True to dest option attribute setattr(parser.values, option.dest, True) def install_filecopy_options(self): """Install file copying specific options""" optgrp = optparse.OptionGroup(self, "File copying") optgrp.add_option("-c", "--copy", action="callback", dest="copy", callback=self._copy_callback, help="copy local file or directory to remote nodes") optgrp.add_option("--rcopy", action="callback", dest="rcopy", callback=self._copy_callback, help="copy file or directory from remote nodes") optgrp.add_option("--dest", action="store", dest="dest_path", help="destination file or directory on the nodes") optgrp.add_option("-p", action="store_true", dest="preserve_flag", help="preserve modification times and modes") self.add_option_group(optgrp) def install_ssh_options(self): """Install engine/connector (ssh) options""" optgrp = optparse.OptionGroup(self, "Ssh/Tree options") optgrp.add_option("-f", "--fanout", action="store", dest="fanout", help="use a specified fanout", type="int") #help="queueing delay for traffic grooming" optgrp.add_option("-Q", action="store", dest="grooming_delay", help=optparse.SUPPRESS_HELP, type="float") optgrp.add_option("-l", "--user", action="store", type="safestring", dest="user", help="execute remote command as user") optgrp.add_option("-o", "--options", action="store", dest="options", help="can be used to give ssh options") optgrp.add_option("-t", "--connect_timeout", action="store", dest="connect_timeout", help="limit time to " \ "connect to a node" ,type="float") optgrp.add_option("-u", "--command_timeout", action="store", dest="command_timeout", help="limit time for " \ "command to run on the node", type="float") self.add_option_group(optgrp) def install_nodeset_commands(self): """Install nodeset commands""" optgrp = optparse.OptionGroup(self, "Commands") optgrp.add_option("-c", "--count", action="store_true", dest="count", default=False, help="show number of nodes in " \ "nodeset(s)") optgrp.add_option("-e", "--expand", action="store_true", dest="expand", default=False, help="expand " \ "nodeset(s) to separate nodes") optgrp.add_option("-f", "--fold", action="store_true", dest="fold", default=False, help="fold nodeset(s) (or " \ "separate nodes) into one nodeset") optgrp.add_option("-l", "--list", action="count", dest="list", default=False, help="list node groups (see -s " \ "GROUPSOURCE)") optgrp.add_option("-r", "--regroup", action="store_true", dest="regroup", default=False, help="fold nodes " \ "using node groups (see -s GROUPSOURCE)") optgrp.add_option("--groupsources", action="store_true", dest="groupsources", default=False, help="list " \ "all configured group sources (see groups.conf(5))") self.add_option_group(optgrp) def install_nodeset_operations(self): """Install nodeset operations""" optgrp = optparse.OptionGroup(self, "Operations") optgrp.add_option("-x", "--exclude", action="append", dest="sub_nodes", default=[], type="string", help="exclude specified nodeset") optgrp.add_option("-i", "--intersection", action="append", dest="and_nodes", default=[], type="string", help="calculate nodesets intersection") optgrp.add_option("-X", "--xor", action="append", dest="xor_nodes", default=[], type="string", help="calculate " \ "symmetric difference between nodesets") self.add_option_group(optgrp) def install_nodeset_options(self): """Install nodeset options""" optgrp = optparse.OptionGroup(self, "Options") optgrp.add_option("-a", "--all", action="store_true", dest="all", help="call external node groups support to " \ "display all nodes") optgrp.add_option("--autostep", action="store", dest="autostep", help="auto step threshold number when folding " \ "nodesets", type="int") optgrp.add_option("-d", "--debug", action="store_true", dest="debug", help="output more messages for debugging purpose") optgrp.add_option("-q", "--quiet", action="store_true", dest="quiet", help="be quiet, print essential output only") optgrp.add_option("-R", "--rangeset", action="store_true", dest="rangeset", help="switch to RangeSet instead " \ "of NodeSet. Useful when working on numerical " \ "cluster ranges, eg. 1,5,18-31") optgrp.add_option("-G", "--groupbase", action="store_true", dest="groupbase", help="hide group source prefix " \ "(always \"@groupname\")") optgrp.add_option("-S", "--separator", action="store", dest="separator", default=' ', help="separator string to use when " \ "expanding nodesets (default: ' ')") optgrp.add_option("-I", "--slice", action="store", dest="slice_rangeset", help="return sliced off result", type="string") optgrp.add_option("--split", action="store", dest="maxsplit", help="split result into a number of subsets", type="int") optgrp.add_option("--contiguous", action="store_true", dest="contiguous", help="split result into " \ "contiguous subsets") self.add_option_group(optgrp) clustershell-1.6/lib/ClusterShell/CLI/Error.py0000644000130500135250000000744411741571247020665 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ CLI error handling helper functions """ import os.path import signal import sys from ClusterShell.Engine.Engine import EngineNotSupportedError from ClusterShell.CLI.Utils import GroupResolverConfigError # dummy but safe from ClusterShell.NodeUtils import GroupResolverSourceError from ClusterShell.NodeUtils import GroupSourceException from ClusterShell.NodeUtils import GroupSourceNoUpcall from ClusterShell.NodeSet import NodeSetExternalError, NodeSetParseError from ClusterShell.NodeSet import RangeSetParseError from ClusterShell.Topology import TopologyError GENERIC_ERRORS = (EngineNotSupportedError, NodeSetExternalError, NodeSetParseError, RangeSetParseError, GroupResolverSourceError, GroupSourceNoUpcall, GroupSourceException, TopologyError, IOError, KeyboardInterrupt) def handle_generic_error(excobj, prog=os.path.basename(sys.argv[0])): """handle error given `excobj' generic script exception""" try: raise excobj except EngineNotSupportedError, exc: print >> sys.stderr, "%s: I/O events engine '%s' not supported on " \ "this host" % (prog, exc.engineid) except NodeSetExternalError, exc: print >> sys.stderr, "%s: External error:" % prog, exc except (NodeSetParseError, RangeSetParseError), exc: print >> sys.stderr, "%s: Parse error:" % prog, exc except GroupResolverSourceError, exc: print >> sys.stderr, "%s: Unknown group source: \"%s\"" % (prog, exc) except GroupSourceNoUpcall, exc: print >> sys.stderr, "%s: No %s upcall defined for group " \ "source \"%s\"" % (prog, exc, exc.group_source.name) except GroupSourceException, exc: print >> sys.stderr, "%s: Other group error:" % prog, exc except TopologyError, exc: print >> sys.stderr, "%s: TREE MODE:" % prog, exc except IOError: # ignore broken pipe pass except KeyboardInterrupt, exc: return 128 + signal.SIGINT except: assert False, "wrong GENERIC_ERRORS" # Exit with error code 1 (generic failure) return 1 clustershell-1.6/lib/ClusterShell/CLI/__init__.py0000644000130500135250000000000011741571247021310 0ustar thiellgpocreclustershell-1.6/lib/ClusterShell/CLI/Clubak.py0000644000130500135250000001614711741571247020775 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ format dsh/pdsh-like output for humans and more For help, type:: $ clubak --help """ from itertools import imap import sys from ClusterShell.MsgTree import MsgTree, MODE_DEFER, MODE_TRACE from ClusterShell.NodeSet import RESOLVER_STD_GROUP, NodeSetParseError from ClusterShell.CLI.Display import Display, THREE_CHOICES from ClusterShell.CLI.Error import GENERIC_ERRORS, handle_generic_error from ClusterShell.CLI.OptionParser import OptionParser from ClusterShell.CLI.Utils import NodeSet, nodeset_cmp def display_tree(tree, disp, out): """display sub-routine for clubak -T (msgtree trace mode)""" togh = True offset = 2 reldepth = -offset reldepths = {} line_mode = disp.line_mode for msgline, keys, depth, nchildren in tree.walk_trace(): if togh: if depth in reldepths: reldepth = reldepths[depth] else: reldepth = reldepths[depth] = reldepth + offset if line_mode: out.write("%s:\n" % NodeSet.fromlist(keys)) else: out.write("%s\n" % \ (disp.format_header(NodeSet.fromlist(keys), reldepth))) out.write("%s%s\n" % (" " * reldepth, msgline)) togh = nchildren != 1 def display(tree, disp, gather, trace_mode, enable_nodeset_key): """nicely display MsgTree instance `tree' content according to `disp' Display object and `gather' boolean flag""" out = sys.stdout try: if trace_mode: display_tree(tree, disp, out) else: if gather: if enable_nodeset_key: # lambda to create a NodeSet from keys returned by walk() ns_getter = lambda x: NodeSet.fromlist(x[1]) for nodeset in sorted(imap(ns_getter, tree.walk()), cmp=nodeset_cmp): disp.print_gather(nodeset, tree[nodeset[0]]) else: for msg, key in tree.walk(): disp.print_gather_keys(key, msg) else: if enable_nodeset_key: # nodes are automagically sorted by NodeSet for node in NodeSet.fromlist(tree.keys()).nsiter(): disp.print_gather(node, tree[str(node)]) else: for key in tree.keys(): disp.print_gather_keys([ key ], tree[key]) finally: out.flush() def clubak(): """script subroutine""" # Argument management parser = OptionParser("%prog [options]") parser.install_display_options(verbose_options=True, separator_option=True, dshbak_compat=True, msgtree_mode=True) options = parser.parse_args()[0] if options.interpret_keys == THREE_CHOICES[-1]: # auto? enable_nodeset_key = None # AUTO else: enable_nodeset_key = (options.interpret_keys == THREE_CHOICES[1]) # Create new message tree if options.trace_mode: tree_mode = MODE_TRACE else: tree_mode = MODE_DEFER tree = MsgTree(mode=tree_mode) fast_mode = options.fast_mode if fast_mode: if tree_mode != MODE_DEFER or options.line_mode: parser.error("incompatible tree options") preload_msgs = {} # Feed the tree from standard input lines for line in sys.stdin: try: linestripped = line.rstrip('\r\n') if options.verbose or options.debug: print "INPUT %s" % linestripped key, content = linestripped.split(options.separator, 1) key = key.strip() if not key: raise ValueError("no node found") if enable_nodeset_key is False: # interpret-keys=never? keyset = [ key ] else: try: keyset = NodeSet(key) except NodeSetParseError: if enable_nodeset_key: # interpret-keys=always? raise enable_nodeset_key = False # auto => switch off keyset = [ key ] if fast_mode: for node in keyset: preload_msgs.setdefault(node, []).append(content) else: for node in keyset: tree.add(node, content) except ValueError, ex: raise ValueError("%s (\"%s\")" % (ex, linestripped)) if fast_mode: # Messages per node have been aggregated, now add to tree one # full msg per node for key, wholemsg in preload_msgs.iteritems(): tree.add(key, '\n'.join(wholemsg)) # Display results try: disp = Display(options) if options.debug: RESOLVER_STD_GROUP.set_verbosity(1) print >> sys.stderr, \ "clubak: line_mode=%s gather=%s tree_depth=%d" % \ (bool(options.line_mode), bool(disp.gather), tree._depth()) display(tree, disp, disp.gather or disp.regroup, \ options.trace_mode, enable_nodeset_key is not False) except ValueError, exc: parser.error("option mismatch (%s)" % exc) def main(): """main script function""" try: clubak() except GENERIC_ERRORS, ex: sys.exit(handle_generic_error(ex)) except ValueError, ex: print >> sys.stderr, "%s:" % sys.argv[0], ex sys.exit(1) sys.exit(0) if __name__ == '__main__': main() clustershell-1.6/lib/ClusterShell/CLI/Config.py0000644000130500135250000001560511741571247020777 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ CLI configuration classes """ import ConfigParser import os from ClusterShell.CLI.Display import VERB_QUIET, VERB_STD, \ VERB_VERB, VERB_DEBUG, THREE_CHOICES class ClushConfigError(Exception): """Exception used by ClushConfig to report an error.""" def __init__(self, section, option, msg): Exception.__init__(self) self.section = section self.option = option self.msg = msg def __str__(self): return "(Config %s.%s): %s" % (self.section, self.option, self.msg) class ClushConfig(ConfigParser.ConfigParser, object): """Config class for clush (specialized ConfigParser)""" main_defaults = { "fanout" : "64", "connect_timeout" : "30", "command_timeout" : "0", "history_size" : "100", "color" : THREE_CHOICES[-1], # auto "verbosity" : "%d" % VERB_STD, "node_count" : "yes", "fd_max" : "16384" } def __init__(self, options, filename=None): """Initialize ClushConfig object from corresponding OptionParser options.""" ConfigParser.ConfigParser.__init__(self) # create Main section with default values self.add_section("Main") for key, value in ClushConfig.main_defaults.iteritems(): self.set("Main", key, value) # config files override defaults values if filename: files = [filename] else: files = ['/etc/clustershell/clush.conf', os.path.expanduser('~/.clush.conf')] self.read(files) # Apply command line overrides if options.quiet: self._set_main("verbosity", VERB_QUIET) if options.verbose: self._set_main("verbosity", VERB_VERB) if options.debug: self._set_main("verbosity", VERB_DEBUG) if options.fanout: self._set_main("fanout", options.fanout) if options.user: self._set_main("ssh_user", options.user) if options.options: self._set_main("ssh_options", options.options) if options.connect_timeout: self._set_main("connect_timeout", options.connect_timeout) if options.command_timeout: self._set_main("command_timeout", options.command_timeout) if options.whencolor: self._set_main("color", options.whencolor) def _set_main(self, option, value): """Set given option/value pair in the Main section.""" self.set("Main", option, str(value)) def _getx(self, xtype, section, option): """Return a value of specified type for the named option.""" try: return getattr(ConfigParser.ConfigParser, 'get%s' % xtype)(self, \ section, option) except (ConfigParser.Error, TypeError, ValueError), exc: raise ClushConfigError(section, option, exc) def getboolean(self, section, option): """Return a boolean value for the named option.""" return self._getx('boolean', section, option) def getfloat(self, section, option): """Return a float value for the named option.""" return self._getx('float', section, option) def getint(self, section, option): """Return an integer value for the named option.""" return self._getx('int', section, option) def _get_optional(self, section, option): """Utility method to get a value for the named option, but do not raise an exception if the option doesn't exist.""" try: return self.get(section, option) except ConfigParser.Error: pass @property def verbosity(self): """verbosity value as an integer""" try: return self.getint("Main", "verbosity") except ClushConfigError: return 0 @property def fanout(self): """fanout value as an integer""" return self.getint("Main", "fanout") @property def connect_timeout(self): """connect_timeout value as a float""" return self.getfloat("Main", "connect_timeout") @property def command_timeout(self): """command_timeout value as a float""" return self.getfloat("Main", "command_timeout") @property def ssh_user(self): """ssh_user value as a string (optional)""" return self._get_optional("Main", "ssh_user") @property def ssh_path(self): """ssh_path value as a string (optional)""" return self._get_optional("Main", "ssh_path") @property def ssh_options(self): """ssh_options value as a string (optional)""" return self._get_optional("Main", "ssh_options") @property def color(self): """color value as a string in (never, always, auto)""" whencolor = self._get_optional("Main", "color") if whencolor not in THREE_CHOICES: raise ClushConfigError("Main", "color", "choose from %s" % \ THREE_CHOICES) return whencolor @property def node_count(self): """node_count value as a boolean""" return self.getboolean("Main", "node_count") @property def fd_max(self): """max number of open files (soft rlimit)""" return self.getint("Main", "fd_max") clustershell-1.6/lib/ClusterShell/CLI/Display.py0000644000130500135250000002423211741571247021173 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ CLI results display class """ import difflib import sys from ClusterShell.NodeSet import NodeSet # Display constants VERB_QUIET = 0 VERB_STD = 1 VERB_VERB = 2 VERB_DEBUG = 3 THREE_CHOICES = ["never", "always", "auto"] WHENCOLOR_CHOICES = THREE_CHOICES # deprecated; use THREE_CHOICES class Display(object): """ Output display class for command line scripts. """ COLOR_RESULT_FMT = "\033[32m%s\033[0m" COLOR_STDOUT_FMT = "\033[34m%s\033[0m" COLOR_STDERR_FMT = "\033[31m%s\033[0m" COLOR_DIFFHDR_FMT = "\033[1m%s\033[0m" COLOR_DIFFHNK_FMT = "\033[36m%s\033[0m" COLOR_DIFFADD_FMT = "\033[32m%s\033[0m" COLOR_DIFFDEL_FMT = "\033[31m%s\033[0m" SEP = "-" * 15 class _KeySet(set): """Private NodeSet substition to display raw keys""" def __str__(self): return ",".join(self) def __init__(self, options, config=None, color=None): """Initialize a Display object from CLI.OptionParser options and optional CLI.ClushConfig. If `color' boolean flag is not specified, it is auto detected according to options.whencolor. """ if options.diff: self._print_buffer = self._print_diff else: self._print_buffer = self._print_content self._display = self._print_buffer self._diffref = None # diff implies at least -b self.gather = options.gatherall or options.gather or options.diff # check parameter combinaison if options.diff and options.line_mode: raise ValueError("diff not supported in line_mode") self.line_mode = options.line_mode self.label = options.label self.regroup = options.regroup self.groupsource = options.groupsource self.noprefix = options.groupbase # display may change when 'max return code' option is set self.maxrc = getattr(options, 'maxrc', False) if color is None: # Should we use ANSI colors? color = False if not options.whencolor or options.whencolor == "auto": color = sys.stdout.isatty() elif options.whencolor == "always": color = True self._color = color self.out = sys.stdout self.err = sys.stderr if self._color: self.color_stdout_fmt = self.COLOR_STDOUT_FMT self.color_stderr_fmt = self.COLOR_STDERR_FMT self.color_diffhdr_fmt = self.COLOR_DIFFHDR_FMT self.color_diffctx_fmt = self.COLOR_DIFFHNK_FMT self.color_diffadd_fmt = self.COLOR_DIFFADD_FMT self.color_diffdel_fmt = self.COLOR_DIFFDEL_FMT else: self.color_stdout_fmt = self.color_stderr_fmt = \ self.color_diffhdr_fmt = self.color_diffctx_fmt = \ self.color_diffadd_fmt = self.color_diffdel_fmt = "%s" # Set display verbosity if config: # config object does already apply options overrides self.node_count = config.node_count self.verbosity = config.verbosity else: self.node_count = True self.verbosity = VERB_STD if hasattr(options, 'quiet') and options.quiet: self.verbosity = VERB_QUIET if hasattr(options, 'verbose') and options.verbose: self.verbosity = VERB_VERB if hasattr(options, 'debug') and options.debug: self.verbosity = VERB_DEBUG def flush(self): """flush display object buffers""" # only used to reset diff display for now self._diffref = None def _getlmode(self): """line_mode getter""" return self._display == self._print_lines def _setlmode(self, value): """line_mode setter""" if value: self._display = self._print_lines else: self._display = self._print_buffer line_mode = property(_getlmode, _setlmode) def _format_nodeset(self, nodeset): """Sub-routine to format nodeset string.""" if self.regroup: return nodeset.regroup(self.groupsource, noprefix=self.noprefix) return str(nodeset) def format_header(self, nodeset, indent=0): """Format nodeset-based header.""" indstr = " " * indent nodecntstr = "" if self.verbosity >= VERB_STD and self.node_count and len(nodeset) > 1: nodecntstr = " (%d)" % len(nodeset) if not self.label: return "" return self.color_stdout_fmt % ("%s%s\n%s%s%s\n%s%s" % \ (indstr, self.SEP, indstr, self._format_nodeset(nodeset), nodecntstr, indstr, self.SEP)) def print_line(self, nodeset, line): """Display a line with optional label.""" if self.label: prefix = self.color_stdout_fmt % ("%s: " % nodeset) self.out.write("%s%s\n" % (prefix, line)) else: self.out.write("%s\n" % line) def print_line_error(self, nodeset, line): """Display an error line with optional label.""" if self.label: prefix = self.color_stderr_fmt % ("%s: " % nodeset) self.err.write("%s%s\n" % (prefix, line)) else: self.err.write("%s\n" % line) def print_gather(self, nodeset, obj): """Generic method for displaying nodeset/content according to current object settings.""" return self._display(NodeSet(nodeset), obj) def print_gather_keys(self, keys, obj): """Generic method for displaying raw keys/content according to current object settings (used by clubak).""" return self._display(self.__class__._KeySet(keys), obj) def _print_content(self, nodeset, content): """Display a dshbak-like header block and content.""" self.out.write("%s\n%s\n" % (self.format_header(nodeset), content)) def _print_diff(self, nodeset, content): """Display unified diff between remote gathered outputs.""" if self._diffref is None: self._diffref = (nodeset, content) else: nodeset_ref, content_ref = self._diffref nsstr_ref = self._format_nodeset(nodeset_ref) nsstr = self._format_nodeset(nodeset) if self.verbosity >= VERB_STD and self.node_count: if len(nodeset_ref) > 1: nsstr_ref += " (%d)" % len(nodeset_ref) if len(nodeset) > 1: nsstr += " (%d)" % len(nodeset) udiff = difflib.unified_diff(list(content_ref), list(content), \ fromfile=nsstr_ref, tofile=nsstr, \ lineterm='') output = "" for line in udiff: if line.startswith('---') or line.startswith('+++'): output += self.color_diffhdr_fmt % line.rstrip() elif line.startswith('@@'): output += self.color_diffctx_fmt % line elif line.startswith('+'): output += self.color_diffadd_fmt % line elif line.startswith('-'): output += self.color_diffdel_fmt % line else: output += line output += '\n' self.out.write(output) def _print_lines(self, nodeset, msg): """Display a MsgTree buffer by line with prefixed header.""" out = self.out if self.label: if self.gather: header = self.color_stdout_fmt % \ ("%s: " % self._format_nodeset(nodeset)) for line in msg: out.write("%s%s\n" % (header, line)) else: for node in nodeset: header = self.color_stdout_fmt % \ ("%s: " % self._format_nodeset(node)) for line in msg: out.write("%s%s\n" % (header, line)) else: if self.gather: for line in msg: out.write(line + '\n') else: for node in nodeset: for line in msg: out.write(line + '\n') def vprint(self, level, message): """Utility method to print a message if verbose level is high enough.""" if self.verbosity >= level: print message def vprint_err(self, level, message): """Utility method to print a message on stderr if verbose level is high enough.""" if self.verbosity >= level: print >> sys.stderr, message clustershell-1.6/lib/ClusterShell/CLI/Nodeset.py0000644000130500135250000002467211741571247021177 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ compute advanced nodeset operations The nodeset command is an utility command provided with the ClusterShell library which implements some features of the NodeSet and RangeSet classes. """ import sys from ClusterShell.CLI.Error import GENERIC_ERRORS, handle_generic_error from ClusterShell.CLI.OptionParser import OptionParser from ClusterShell.CLI.Utils import NodeSet # safe import from ClusterShell.NodeSet import RangeSet, grouplist, RESOLVER_STD_GROUP def process_stdin(xsetop, xsetcls, autostep): """Process standard input and operate on xset.""" # Build temporary set (stdin accumulator) tmpset = xsetcls(autostep=autostep) for line in sys.stdin.readlines(): # Support multi-lines and multi-nodesets per line line = line[0:line.find('#')].strip() for elem in line.split(): # Do explicit object creation for RangeSet tmpset.update(xsetcls(elem, autostep=autostep)) # Perform operation on xset if tmpset: xsetop(tmpset) def compute_nodeset(xset, args, autostep): """Apply operations and operands from args on xset, an initial RangeSet or NodeSet.""" class_set = xset.__class__ # Process operations while args: arg = args.pop(0) if arg in ("-i", "--intersection"): val = args.pop(0) if val == '-': process_stdin(xset.intersection_update, class_set, autostep) else: xset.intersection_update(class_set(val, autostep=autostep)) elif arg in ("-x", "--exclude"): val = args.pop(0) if val == '-': process_stdin(xset.difference_update, class_set, autostep) else: xset.difference_update(class_set(val, autostep=autostep)) elif arg in ("-X", "--xor"): val = args.pop(0) if val == '-': process_stdin(xset.symmetric_difference_update, class_set, autostep) else: xset.symmetric_difference_update(class_set(val, autostep=autostep)) elif arg == '-': process_stdin(xset.update, xset.__class__, autostep) else: xset.update(class_set(arg, autostep=autostep)) return xset def command_list(options, xset): """List (-l/-ll/-lll) command handler.""" list_level = options.list # list groups of some specified nodes? if options.all or xset or \ options.and_nodes or options.sub_nodes or options.xor_nodes: # When some node sets are provided as argument, the list command # retrieves node groups these nodes belong to, thanks to the # groups() method (new in 1.6). Note: stdin support is enabled # when the '-' special character is encountered. groups = xset.groups(options.groupsource, options.groupbase) for group, (gnodes, inodes) in groups.iteritems(): if list_level == 1: # -l print group elif list_level == 2: # -ll print "%s %s" % (group, inodes) else: # -lll print "%s %s %d/%d" % (group, inodes, len(inodes), \ len(gnodes)) return # "raw" group list when no argument at all for group in grouplist(options.groupsource): if options.groupsource and not options.groupbase: nsgroup = "@%s:%s" % (options.groupsource, group) else: nsgroup = "@%s" % group if list_level == 1: # -l print nsgroup else: nodes = NodeSet(nsgroup) if list_level == 2: # -ll print "%s %s" % (nsgroup, nodes) else: # -lll print "%s %s %d" % (nsgroup, nodes, len(nodes)) def nodeset(): """script subroutine""" class_set = NodeSet usage = "%prog [COMMAND] [OPTIONS] [ns1 [-ixX] ns2|...]" parser = OptionParser(usage) parser.install_nodeset_commands() parser.install_nodeset_operations() parser.install_nodeset_options() (options, args) = parser.parse_args() if options.debug: RESOLVER_STD_GROUP.set_verbosity(1) # Check for command presence cmdcount = int(options.count) + int(options.expand) + \ int(options.fold) + int(bool(options.list)) + \ int(options.regroup) + int(options.groupsources) if not cmdcount: parser.error("No command specified.") elif cmdcount > 1: parser.error("Multiple commands not allowed.") if options.rangeset: class_set = RangeSet if options.all or options.regroup: if class_set != NodeSet: parser.error("-a/-r only supported in NodeSet mode") if options.maxsplit is not None and options.contiguous: parser.error("incompatible splitting options (split, contiguous)") if options.maxsplit is None: options.maxsplit = 1 if options.groupsource and not options.quiet and \ (class_set == RangeSet or options.groupsources): print >> sys.stderr, "WARNING: option group source \"%s\" ignored" \ % options.groupsource # The groupsources command simply lists group sources. if options.groupsources: if options.quiet: dispdefault = "" # don't show (default) if quiet is set else: dispdefault = " (default)" for src in RESOLVER_STD_GROUP.sources(): print "%s%s" % (src, dispdefault) dispdefault = "" return # We want -s to act as a substition of default groupsource # (ie. it's not necessary to prefix group names by this group source). if options.groupsource: RESOLVER_STD_GROUP.default_sourcename = options.groupsource # Instantiate RangeSet or NodeSet object xset = class_set(autostep=options.autostep) if options.all: # Include all nodes from external node groups support. xset.update(NodeSet.fromall()) # uses default_sourcename if not args and not options.all and not options.list: # No need to specify '-' to read stdin in these cases process_stdin(xset.update, xset.__class__, options.autostep) # Apply first operations (before first non-option) for nodes in options.and_nodes: if nodes == '-': process_stdin(xset.intersection_update, xset.__class__, options.autostep) else: xset.intersection_update(class_set(nodes, autostep=options.autostep)) for nodes in options.sub_nodes: if nodes == '-': process_stdin(xset.difference_update, xset.__class__, options.autostep) else: xset.difference_update(class_set(nodes, autostep=options.autostep)) for nodes in options.xor_nodes: if nodes == '-': process_stdin(xset.symmetric_difference_update, xset.__class__, options.autostep) else: xset.symmetric_difference_update(class_set(nodes, \ autostep=options.autostep)) # Finish xset computing from args compute_nodeset(xset, args, options.autostep) # The list command has a special handling if options.list > 0: return command_list(options, xset) # Interprete special characters (may raise SyntaxError) separator = eval('\'%s\'' % options.separator, {"__builtins__":None}, {}) if options.slice_rangeset: _xset = class_set() for sli in RangeSet(options.slice_rangeset).slices(): _xset.update(xset[sli]) xset = _xset # Display result according to command choice if options.expand: xsubres = lambda x: separator.join(x.striter()) elif options.fold: xsubres = lambda x: x elif options.regroup: xsubres = lambda x: x.regroup(options.groupsource, \ noprefix=options.groupbase) else: xsubres = len if not xset or options.maxsplit <= 1 and not options.contiguous: print xsubres(xset) else: if options.contiguous: xiterator = xset.contiguous() else: xiterator = xset.split(options.maxsplit) for xsubset in xiterator: print xsubres(xsubset) def main(): """main script function""" try: nodeset() except AssertionError, ex: print >> sys.stderr, "ERROR:", ex sys.exit(1) except IndexError: print >> sys.stderr, "ERROR: syntax error" sys.exit(1) except SyntaxError: print >> sys.stderr, "ERROR: invalid separator" sys.exit(1) except GENERIC_ERRORS, ex: sys.exit(handle_generic_error(ex)) sys.exit(0) if __name__ == '__main__': main() clustershell-1.6/lib/ClusterShell/CLI/Utils.py0000644000130500135250000000506711741571247020673 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ CLI utility functions """ import sys # CLI modules might safely import the NodeSet class from here. from ClusterShell.NodeUtils import GroupResolverConfigError try: from ClusterShell.NodeSet import NodeSet except GroupResolverConfigError, exc: print >> sys.stderr, \ "ERROR: ClusterShell node groups configuration error:\n\t%s" % exc sys.exit(1) def nodeset_cmp(ns1, ns2): """Compare 2 nodesets by their length (we want larger nodeset first) and then by first node.""" len_cmp = cmp(len(ns2), len(ns1)) if not len_cmp: smaller = NodeSet.fromlist([ns1[0], ns2[0]])[0] if smaller == ns1[0]: return -1 else: return 1 return len_cmp def bufnodeset_cmp(bn1, bn2): """Convenience function to compare 2 (buf, nodeset) tuples by their nodeset length (we want larger nodeset first) and then by first node.""" # Extract nodesets and call nodeset_cmp return nodeset_cmp(bn1[1], bn2[1]) clustershell-1.6/lib/ClusterShell/MsgTree.py0000644000130500135250000002760611741571247020535 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ MsgTree ClusterShell message tree module. The purpose of MsgTree is to provide a shared message tree for storing message lines received from ClusterShell Workers (for example, from remote cluster commands). It should be efficient, in term of algorithm and memory consumption, especially when remote messages are the same. """ from itertools import ifilterfalse, imap from operator import itemgetter # MsgTree behavior modes MODE_DEFER = 0 MODE_SHIFT = 1 MODE_TRACE = 2 class MsgTreeElem(object): """ Class representing an element of the MsgTree and its associated message. Object of this class are returned by the various MsgTree methods like messages() or walk(). The object can then be used as an iterator over the message lines or casted into a string. """ def __init__(self, msgline=None, parent=None, trace=False): """ Initialize message tree element. """ # structure self.parent = parent self.children = {} if trace: # special behavior for trace mode self._shift = self._shift_trace else: self._shift = self._shift_notrace # content self.msgline = msgline self.keys = None def __len__(self): """Length of whole message string.""" return len(str(self)) def __eq__(self, other): """Comparison method compares whole message strings.""" return str(self) == str(other) def _add_key(self, key): """Add a key to this tree element.""" if self.keys is None: self.keys = set([key]) else: self.keys.add(key) def _shift_notrace(self, key, target_elem): """Shift one of our key to specified target element.""" if self.keys and len(self.keys) == 1: shifting = self.keys self.keys = None else: shifting = set([ key ]) if self.keys: self.keys.difference_update(shifting) if not target_elem.keys: target_elem.keys = shifting else: target_elem.keys.update(shifting) return target_elem def _shift_trace(self, key, target_elem): """Shift one of our key to specified target element (trace mode: keep backtrace of keys).""" if not target_elem.keys: target_elem.keys = set([ key ]) else: target_elem.keys.add(key) return target_elem def __getitem__(self, i): return list(self.lines())[i] def __iter__(self): """Iterate over message lines starting from this tree element.""" # no msgline in root element if self.msgline is None: return # trace the message path path = [self.msgline] parent = self.parent while parent.msgline is not None: path.append(parent.msgline) parent = parent.parent # rewind path while path: yield path.pop() def lines(self): """ Get the whole message lines iterator from this tree element. """ return iter(self) splitlines = lines def message(self): """ Get the whole message buffer from this tree element. """ # concat buffers return '\n'.join(self.lines()) __str__ = message def append(self, msgline, key=None): """ A new message is coming, append it to the tree element with optional associated source key. Called by MsgTree.add(). Return corresponding MsgTreeElem (possibly newly created). """ if key is None: # No key association, MsgTree is in MODE_DEFER return self.children.setdefault(msgline, \ self.__class__(msgline, self, self._shift == self._shift_trace)) else: # key given: get/create new child element and shift down the key return self._shift(key, self.children.setdefault(msgline, \ self.__class__(msgline, self, self._shift == self._shift_trace))) class MsgTree(object): """ A MsgTree object maps key objects to multi-lines messages. MsgTree's are mutable objects. Keys are almost arbitrary values (must be hashable). Message lines are organized as a tree internally. MsgTree provides low memory consumption especially on a cluster when all nodes return similar messages. Also, the gathering of messages is done automatically. """ def __init__(self, mode=MODE_DEFER): """MsgTree initializer The `mode' parameter should be set to one of the following constant: MODE_DEFER: all messages are processed immediately, saving memory from duplicate message lines, but keys are associated to tree elements only when needed. MODE_SHIFT: all keys and messages are processed immediately, it is more CPU time consuming as MsgTree full state is updated at each add() call. MODE_TRACE: all keys and messages and processed immediately, and keys are kept for each message element of the tree. The special method walk_trace() is then available to walk all elements of the tree. """ self.mode = mode # root element of MsgTree self._root = MsgTreeElem(trace=(mode == MODE_TRACE)) # dict of keys to MsgTreeElem self._keys = {} def clear(self): """Remove all items from the MsgTree.""" self._root = MsgTreeElem(trace=(self.mode == MODE_TRACE)) self._keys.clear() def __len__(self): """Return the number of keys contained in the MsgTree.""" return len(self._keys) def __getitem__(self, key): """Return the message of MsgTree with specified key. Raises a KeyError if key is not in the MsgTree.""" return self._keys[key] def get(self, key, default=None): """ Return the message for key if key is in the MsgTree, else default. If default is not given, it defaults to None, so that this method never raises a KeyError. """ return self._keys.get(key, default) def add(self, key, msgline): """ Add a message line associated with the given key to the MsgTree. """ # try to get current element in MsgTree for the given key, # defaulting to the root element e_msg = self._keys.get(key, self._root) if self.mode >= MODE_SHIFT: key_shift = key else: key_shift = None # add child msg and update keys dict self._keys[key] = e_msg.append(msgline, key_shift) def _update_keys(self): """Update keys associated to tree elements.""" for key, e_msg in self._keys.iteritems(): assert key is not None and e_msg is not None e_msg._add_key(key) def keys(self): """Return an iterator over MsgTree's keys.""" return self._keys.iterkeys() __iter__ = keys def messages(self, match=None): """Return an iterator over MsgTree's messages.""" return imap(itemgetter(0), self.walk(match)) def items(self, match=None, mapper=None): """ Return (key, message) for each key of the MsgTree. """ if mapper is None: mapper = lambda k: k for key, elem in self._keys.iteritems(): if match is None or match(key): yield mapper(key), elem def _depth(self): """ Return the depth of the MsgTree, ie. the max number of lines per message. Added for debugging. """ depth = 0 # stack of (element, depth) tuples used to walk the tree estack = [ (self._root, depth) ] while estack: elem, edepth = estack.pop() if len(elem.children) > 0: estack += [(v, edepth + 1) for v in elem.children.values()] depth = max(depth, edepth) return depth def walk(self, match=None, mapper=None): """ Walk the tree. Optionally filter keys on match parameter, and optionally map resulting keys with mapper function. Return an iterator over (message, keys) tuples for each different message in the tree. """ if self.mode == MODE_DEFER: self._update_keys() # stack of elements used to walk the tree (depth-first) estack = [ self._root ] while estack: elem = estack.pop() children = elem.children if len(children) > 0: estack += children.values() if elem.keys: # has some keys mkeys = filter(match, elem.keys) if len(mkeys): yield elem, map(mapper, mkeys) def walk_trace(self, match=None, mapper=None): """ Walk the tree in trace mode. Optionally filter keys on match parameter, and optionally map resulting keys with mapper function. Return an iterator over 4-length tuples (msgline, keys, depth, num_children). """ assert self.mode == MODE_TRACE, \ "walk_trace() is only callable in trace mode" # stack of (element, depth) tuples used to walk the tree estack = [ (self._root, 0) ] while estack: elem, edepth = estack.pop() children = elem.children nchildren = len(children) if nchildren > 0: estack += [(v, edepth + 1) for v in children.values()] if elem.keys: mkeys = filter(match, elem.keys) if len(mkeys): yield elem.msgline, map(mapper, mkeys), edepth, nchildren def remove(self, match=None): """ Modify the tree by removing any matching key references from the messages tree. Example of use: >>> msgtree.remove(lambda k: k > 3) """ estack = [ self._root ] # walk the tree to keep only matching keys while estack: elem = estack.pop() if len(elem.children) > 0: estack += elem.children.values() if elem.keys: # has some keys elem.keys = set(ifilterfalse(match, elem.keys)) # also remove key(s) from known keys dict for key in filter(match, self._keys.keys()): del self._keys[key] clustershell-1.6/lib/ClusterShell/Communication.py0000644000130500135250000003305511741571247021767 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Henri DOREAU # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and abiding # by the rules of distribution of free software. You can use, modify and/ or # redistribute the software under the terms of the CeCILL-C license as # circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, modify # and redistribute granted by the license, users are provided only with a # limited warranty and the software's author, the holder of the economic rights, # and the successive licensors have only limited liability. # # In this respect, the user's attention is drawn to the risks associated with # loading, using, modifying and/or developing or reproducing the software by the # user in light of its specific status of free software, that may mean that it # is complicated to manipulate, and that also therefore means that it is # reserved for developers and experienced professionals having in-depth computer # knowledge. Users are therefore encouraged to load and test the software's # suitability as regards their requirements in conditions enabling the security # of their systems and/or data to be ensured and, more generally, to use and # operate it in the same conditions as regards security. # # The fact that you are presently reading this means that you have had knowledge # of the CeCILL-C license and that you accept its terms. """ ClusterShell inter-nodes communication module This module contains the required material for nodes to communicate between each others within the propagation tree. At the highest level, messages are instances of several classes. They can be converted into XML to be sent over SSH links through a Channel instance. In the other side, XML is parsed and new message objects are instanciated. Communication channels have been implemented as ClusterShell events handlers. Whenever a message chunk is read, the data is given to a SAX XML parser, that will use it to create corresponding messages instances as a messages factory. As soon as an instance is ready, it is then passed to a recv() method in the channel. The recv() method of the Channel class is a stub, that requires to be implemented in subclass to process incoming messages. So is the start() method too. Subclassing the Channel class allows implementing whatever logic you want on the top of a communication channel. """ import cPickle import base64 import logging import xml.sax from xml.sax.handler import ContentHandler from xml.sax.saxutils import XMLGenerator from xml.sax import SAXParseException from collections import deque from cStringIO import StringIO from ClusterShell.Event import EventHandler def strdel(s, badchars): """return s a copy of s with every badchar occurences removed""" stripped = s for ch in badchars: stripped = stripped.replace(ch, '') return stripped class MessageProcessingError(Exception): """base exception raised when an error occurs while processing incoming or outgoing messages. """ class XMLReader(ContentHandler): """SAX handler for XML -> Messages instances conversion""" def __init__(self): """ """ ContentHandler.__init__(self) self.msg_queue = deque() # current packet under construction self._draft = None self._sections_map = None def startElement(self, name, attrs): """read a starting xml tag""" if name == 'channel': pass elif name == 'message': self._draft_new(attrs) elif self._draft is not None: self._draft_update(name, attrs) else: raise MessageProcessingError('Invalid starting tag %s' % name) def endElement(self, name): """read an ending xml tag""" # end of message if name == 'message': self.msg_queue.appendleft(self._draft) self._draft = None elif name == 'channel': self.msg_queue.append(EndMessage()) def characters(self, content): """read content characters""" if self._draft is not None: content = content.decode('utf-8') #content = strdel(content, [' ', '\t', '\r', '\n']) if content != '': self._draft.data_update(content) def msg_available(self): """return whether a message is available for delivery or not""" return len(self.msg_queue) > 0 def pop_msg(self): """pop and return the oldest message queued""" if len(self.msg_queue) > 0: return self.msg_queue.pop() def _draft_new(self, attributes): """start a new packet construction""" # associative array to select to correct constructor according to the # message type field contained in the serialized representation ctors_map = { ConfigurationMessage.ident: ConfigurationMessage, ControlMessage.ident: ControlMessage, ACKMessage.ident: ACKMessage, ErrorMessage.ident: ErrorMessage, StdOutMessage.ident: StdOutMessage, StdErrMessage.ident: StdErrMessage, RetcodeMessage.ident: RetcodeMessage, TimeoutMessage.ident: TimeoutMessage, } try: msg_type = attributes['type'] # select the good constructor ctor = ctors_map[msg_type] except KeyError: raise MessageProcessingError('Unknown message type') self._draft = ctor() # obtain expected sections map for this type of messages self._draft_update('message', attributes) def _draft_update(self, name, attributes): """update the current message draft with a new section""" assert(self._draft is not None) if name == 'message': self._draft.selfbuild(attributes) else: raise MessageProcessingError('Invalid tag %s' % name) class Channel(EventHandler): """Use this event handler to establish a communication channel between to hosts whithin the propagation tree. The endpoint's logic has to be implemented by subclassing the Channel class and overriding the start() and recv() methods. There is no default behavior for these methods apart raising a NotImplementedError. Usage: >> chan = MyChannel() # inherits Channel >> task = task_self() >> task.shell("uname -a", node="host2", handler=chan) >> task.resume() """ def __init__(self): """ """ EventHandler.__init__(self) self.exit = False self.worker = None self._xml_reader = XMLReader() self._parser = xml.sax.make_parser(["IncrementalParser"]) self._parser.setContentHandler(self._xml_reader) self.logger = logging.getLogger(__name__) def _open(self): """open a new communication channel from src to dst""" generator = XMLGenerator(self.worker, encoding='UTF-8') generator.startDocument() generator.startElement('channel', {}) def _close(self): """close an already opened channel""" generator = XMLGenerator(self.worker) generator.endElement('channel') # XXX self.worker.write('\n') self.exit = True def ev_start(self, worker): """connection established. Open higher level channel""" self.worker = worker self.start() def ev_written(self, worker): if self.exit: self.logger.debug("aborting worker after last write") self.worker.abort() def ev_read(self, worker): """channel has data to read""" raw = worker.current_msg #self.logger.debug("ev_read raw=\'%s\'" % raw) try: self._parser.feed(raw + '\n') except SAXParseException, ex: raise MessageProcessingError( \ 'Invalid communication (%s): "%s"' % (ex.getMessage(), raw)) # pass next message to the driver if ready if self._xml_reader.msg_available(): msg = self._xml_reader.pop_msg() assert msg is not None self.recv(msg) def send(self, msg): """write an outgoing message as its XML representation""" #print '[DBG] send: %s' % str(msg) #self.logger.debug("SENDING to %s: \"%s\"" % (self.worker, msg.xml())) self.worker.write(msg.xml() + '\n') def start(self): """initialization logic""" raise NotImplementedError('Abstract method: subclasses must implement') def recv(self, msg): """callback: process incoming message""" raise NotImplementedError('Abstract method: subclasses must implement') class Message(object): """base message class""" _inst_counter = 0 ident = 'GEN' def __init__(self): """ """ self.attr = {'type': str, 'msgid': int} self.type = self.__class__.ident self.msgid = Message._inst_counter self.data = '' Message._inst_counter += 1 def data_encode(self, inst): """serialize an instance and store the result""" self.data = base64.encodestring(cPickle.dumps(inst)) def data_decode(self): """deserialize a previously encoded instance and return it""" return cPickle.loads(base64.decodestring(self.data)) def data_update(self, raw): """append data to the instance (used for deserialization)""" # TODO : bufferize and use ''.join() for performance #self.logger.debug("data_update raw=%s" % raw) self.data += raw def selfbuild(self, attributes): """self construction from a table of attributes""" for k, fmt in self.attr.iteritems(): try: setattr(self, k, fmt(attributes[k])) except KeyError: raise MessageProcessingError( 'Invalid "message" attributes: missing key "%s"' % k) def __str__(self): """printable representation""" elts = ['%s: %s' % (k, str(self.__dict__[k])) for k in self.attr.keys()] attributes = ', '.join(elts) return "Message %s (%s)" % (self.type, attributes) def xml(self): """generate XML version of a configuration message""" out = StringIO() generator = XMLGenerator(out) # "stringify" entries for XML conversion state = {} for k in self.attr: state[k] = str(getattr(self, k)) generator.startElement('message', state) generator.characters(self.data) generator.endElement('message') xml_msg = out.getvalue() out.close() return xml_msg class ConfigurationMessage(Message): """configuration propagation container""" ident = 'CFG' class RoutedMessageBase(Message): """abstract class for routed message (with worker source id)""" def __init__(self, srcid): Message.__init__(self) self.attr.update({'srcid': int}) self.srcid = srcid class ControlMessage(RoutedMessageBase): """action request""" ident = 'CTL' def __init__(self, srcid=0): """ """ RoutedMessageBase.__init__(self, srcid) self.attr.update({'action': str, 'target': str}) self.action = '' self.target = '' class ACKMessage(Message): """acknowledgement message""" ident = 'ACK' def __init__(self, ackid=0): """ """ Message.__init__(self) self.attr.update({'ack': int}) self.ack = ackid def data_update(self, raw): """override method to ensure that incoming ACK messages don't contain unexpected payloads """ raise MessageProcessingError('ACK messages have no payload') class ErrorMessage(Message): """error message""" ident = 'ERR' def __init__(self, err=''): """ """ Message.__init__(self) self.attr.update({'reason': str}) self.reason = err def data_update(self, raw): """override method to ensure that incoming ACK messages don't contain unexpected payloads """ raise MessageProcessingError('Error message have no payload') class StdOutMessage(RoutedMessageBase): """container message for standard output""" ident = 'OUT' def __init__(self, nodes='', output='', srcid=0): """ """ RoutedMessageBase.__init__(self, srcid) self.attr.update({'nodes': str}) self.nodes = nodes self.data = output class StdErrMessage(StdOutMessage): ident = 'SER' class RetcodeMessage(RoutedMessageBase): """container message for return code""" ident = 'RET' def __init__(self, nodes='', retcode=0, srcid=0): """ """ RoutedMessageBase.__init__(self, srcid) self.attr.update({'retcode': int, 'nodes': str}) self.retcode = retcode self.nodes = nodes def data_update(self, raw): """override method to ensure that incoming ACK messages don't contain unexpected payloads """ raise MessageProcessingError('Retcode message has no payload') class TimeoutMessage(RoutedMessageBase): """container message for timeout notification""" ident = 'TIM' def __init__(self, nodes='', srcid=0): """ """ RoutedMessageBase.__init__(self, srcid) self.attr.update({'nodes': str}) self.nodes = nodes class EndMessage(Message): """end of channel message""" ident = 'END' clustershell-1.6/lib/ClusterShell/Engine/0000755000130500135250000000000011741572333020004 5ustar thiellgpocreclustershell-1.6/lib/ClusterShell/Engine/Select.py0000644000130500135250000001755711741571247021617 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2009, 2010, 2011) # Contributors: # Henri DOREAU # Aurelien DEGREMONT # Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ A select() based ClusterShell Engine. The select() system call is available on almost every UNIX-like systems. """ import errno import select import sys import time from ClusterShell.Engine.Engine import Engine from ClusterShell.Engine.Engine import EngineTimeoutException from ClusterShell.Worker.EngineClient import EngineClientEOF class EngineSelect(Engine): """ Select Engine ClusterShell engine using the select.select mechanism """ identifier = "select" def __init__(self, info): """ Initialize Engine. """ Engine.__init__(self, info) self._fds_r = [] self._fds_w = [] def _register_specific(self, fd, event): """ Engine-specific fd registering. Called by Engine register. """ if event & (Engine.E_READ | Engine.E_ERROR): self._fds_r.append(fd) elif event & Engine.E_WRITE: self._fds_w.append(fd) def _unregister_specific(self, fd, ev_is_set): """ Engine-specific fd unregistering. Called by Engine unregister. """ if ev_is_set or True: if fd in self._fds_r: self._fds_r.remove(fd) if fd in self._fds_w: self._fds_w.remove(fd) def _modify_specific(self, fd, event, setvalue): """ Engine-specific modifications after a interesting event change for a file descriptor. Called automatically by Engine register/unregister and set_events(). For the select() engine, it appends/remove the fd to/from the concerned fd_sets. """ self._debug("MODSPEC fd=%d event=%x setvalue=%d" % (fd, event, setvalue)) if setvalue: self._register_specific(fd, event) else: self._unregister_specific(fd, True) def runloop(self, timeout): """ Select engine run(): start clients and properly get replies """ if timeout == 0: timeout = -1 start_time = time.time() # run main event loop... while self.evlooprefcnt > 0: self._debug("LOOP evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % (self.evlooprefcnt, self.reg_clifds.keys(), len(self.timerq))) try: timeo = self.timerq.nextfire_delay() if timeout > 0 and timeo >= timeout: # task timeout may invalidate clients timeout self.timerq.clear() timeo = timeout elif timeo == -1: timeo = timeout self._current_loopcnt += 1 if timeo >= 0: r_ready, w_ready, x_ready = \ select.select(self._fds_r, self._fds_w, [], timeo) else: # no timeout specified, do not supply the timeout argument r_ready, w_ready, x_ready = \ select.select(self._fds_r, self._fds_w, []) except select.error, (ex_errno, ex_strerror): # might get interrupted by a signal if ex_errno == errno.EINTR: continue elif ex_errno in [errno.EINVAL, errno.EBADF, errno.ENOMEM]: print >> sys.stderr, "EngineSelect: %s" % ex_strerror else: raise # iterate over fd on which events occured for fd in set(r_ready) | set(w_ready): # get client instance client, fdev = self._fd2client(fd) if client is None: continue # process this client client._current_client = client # check for possible unblocking read on this fd if fd in r_ready: assert fdev & (Engine.E_READ | Engine.E_ERROR) assert client._events & fdev self.modify(client, 0, fdev) try: if fdev & Engine.E_READ: client._handle_read() else: client._handle_error() except EngineClientEOF: self._debug("EngineClientEOF %s" % client) # if the EOF occurs on E_READ... if fdev & Engine.E_READ: # and if the client is also waiting for E_ERROR if client._events & Engine.E_ERROR: # just clear the event for E_READ self.modify(client, 0, fdev) else: # otherwise we can remove the client self.remove(client) else: # same thing in the other order... if client._events & Engine.E_READ: self.modify(client, 0, fdev) else: self.remove(client) # check for writing if fd in w_ready: self._debug("W_READY fd=%d %s (r%s,e%s,w%s)" % (fd, client.__class__.__name__, client.reader_fileno(), client.error_fileno(), client.writer_fileno())) assert fdev == Engine.E_WRITE assert client._events & fdev self.modify(client, 0, fdev) client._handle_write() # post processing self._current_client = None # apply any changes occured during processing if client.registered: self.set_events(client, client._new_events) # check for task runloop timeout if timeout > 0 and time.time() >= start_time + timeout: raise EngineTimeoutException() # process clients timeout self.fire_timers() self._debug("LOOP EXIT evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % \ (self.evlooprefcnt, self.reg_clifds, len(self.timerq))) clustershell-1.6/lib/ClusterShell/Engine/Engine.py0000644000130500135250000006165411741571247021602 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Interface of underlying Task's Engine. An Engine implements a loop your thread enters and uses to call event handlers in response to incoming events (from workers, timers, etc.). """ import errno import heapq import logging import time class EngineException(Exception): """ Base engine exception. """ class EngineAbortException(EngineException): """ Raised on user abort. """ def __init__(self, kill): EngineException.__init__(self) self.kill = kill class EngineTimeoutException(EngineException): """ Raised when a timeout is encountered. """ class EngineIllegalOperationError(EngineException): """ Error raised when an illegal operation has been performed. """ class EngineAlreadyRunningError(EngineIllegalOperationError): """ Error raised when the engine is already running. """ class EngineNotSupportedError(EngineException): """ Error raised when the engine mechanism is not supported. """ def __init__(self, engineid): EngineException.__init__(self) self.engineid = engineid class EngineBaseTimer: """ Abstract class for ClusterShell's engine timer. Such a timer requires a relative fire time (delay) in seconds (as float), and supports an optional repeating interval in seconds (as float too). See EngineTimer for more information about ClusterShell timers. """ def __init__(self, fire_delay, interval=-1.0, autoclose=False): """ Create a base timer. """ self.fire_delay = fire_delay self.interval = interval self.autoclose = autoclose self._engine = None self._timercase = None def _set_engine(self, engine): """ Bind to engine, called by Engine. """ if self._engine: # A timer can be registered to only one engine at a time. raise EngineIllegalOperationError("Already bound to engine.") self._engine = engine def invalidate(self): """ Invalidates a timer object, stopping it from ever firing again. """ if self._engine: self._engine.timerq.invalidate(self) self._engine = None def is_valid(self): """ Returns a boolean value that indicates whether an EngineTimer object is valid and able to fire. """ return self._engine != None def set_nextfire(self, fire_delay, interval=-1): """ Set the next firing delay in seconds for an EngineTimer object. The optional paramater `interval' sets the firing interval of the timer. If not specified, the timer fires once and then is automatically invalidated. Time values are expressed in second using floating point values. Precision is implementation (and system) dependent. It is safe to call this method from the task owning this timer object, in any event handlers, anywhere. However, resetting a timer's next firing time may be a relatively expensive operation. It is more efficient to let timers autorepeat or to use this method from the timer's own event handler callback (ie. from its ev_timer). """ if not self.is_valid(): raise EngineIllegalOperationError("Operation on invalid timer.") self.fire_delay = fire_delay self.interval = interval self._engine.timerq.reschedule(self) def _fire(self): raise NotImplementedError("Derived classes must implement.") class EngineTimer(EngineBaseTimer): """ Concrete class EngineTimer An EngineTimer object represents a timer bound to an engine that fires at a preset time in the future. Timers can fire either only once or repeatedly at fixed time intervals. Repeating timers can also have their next firing time manually adjusted. A timer is not a real-time mechanism; it fires when the task's underlying engine to which the timer has been added is running and able to check if the timer's firing time has passed. """ def __init__(self, fire_delay, interval, autoclose, handler): EngineBaseTimer.__init__(self, fire_delay, interval, autoclose) self.eh = handler assert self.eh != None, "An event handler is needed for timer." def _fire(self): self.eh.ev_timer(self) class _EngineTimerQ: class _EngineTimerCase: """ Helper class that allows comparisons of fire times, to be easily used in an heapq. """ def __init__(self, client): self.client = client self.client._timercase = self # arm timer (first time) assert self.client.fire_delay > 0 self.fire_date = self.client.fire_delay + time.time() def __cmp__(self, other): return cmp(self.fire_date, other.fire_date) def arm(self, client): assert client != None self.client = client self.client._timercase = self # setup next firing date time_current = time.time() if self.client.fire_delay > 0: self.fire_date = self.client.fire_delay + time_current else: interval = float(self.client.interval) assert interval > 0 self.fire_date += interval # If the firing time is delayed so far that it passes one # or more of the scheduled firing times, reschedule the # timer for the next scheduled firing time in the future. while self.fire_date < time_current: self.fire_date += interval def disarm(self): client = self.client client._timercase = None self.client = None return client def armed(self): return self.client != None def __init__(self, engine): """ Initializer. """ self._engine = engine self.timers = [] self.armed_count = 0 def __len__(self): """ Return the number of active timers. """ return self.armed_count def schedule(self, client): """ Insert and arm a client's timer. """ # arm only if fire is set if client.fire_delay > 0: heapq.heappush(self.timers, _EngineTimerQ._EngineTimerCase(client)) self.armed_count += 1 if not client.autoclose: self._engine.evlooprefcnt += 1 def reschedule(self, client): """ Re-insert client's timer. """ if client._timercase: self.invalidate(client) self._dequeue_disarmed() self.schedule(client) def invalidate(self, client): """ Invalidate client's timer. Current implementation doesn't really remove the timer, but simply flags it as disarmed. """ if not client._timercase: # if timer is being fire, invalidate its values client.fire_delay = 0 client.interval = 0 return if self.armed_count <= 0: raise ValueError, "Engine client timer not found in timer queue" client._timercase.disarm() self.armed_count -= 1 if not client.autoclose: self._engine.evlooprefcnt -= 1 def _dequeue_disarmed(self): """ Dequeue disarmed timers (sort of garbage collection). """ while len(self.timers) > 0 and not self.timers[0].armed(): heapq.heappop(self.timers) def fire(self): """ Remove the smallest timer from the queue and fire its associated client. Raise IndexError if the queue is empty. """ self._dequeue_disarmed() timercase = heapq.heappop(self.timers) client = timercase.disarm() client.fire_delay = 0 client._fire() if client.fire_delay > 0 or client.interval > 0: timercase.arm(client) heapq.heappush(self.timers, timercase) else: self.armed_count -= 1 if not client.autoclose: self._engine.evlooprefcnt -= 1 def nextfire_delay(self): """ Return next timer fire delay (relative time). """ self._dequeue_disarmed() if len(self.timers) > 0: return max(0., self.timers[0].fire_date - time.time()) return -1 def expired(self): """ Has a timer expired? """ self._dequeue_disarmed() return len(self.timers) > 0 and \ (self.timers[0].fire_date - time.time()) <= 1e-2 def clear(self): """ Stop and clear all timers. """ for timer in self.timers: if timer.armed(): timer.client.invalidate() self.timers = [] self.armed_count = 0 class Engine: """ Interface for ClusterShell engine. Subclasses have to implement a runloop listening for client events. """ # Engine client I/O event interest bits E_READ = 0x1 E_ERROR = 0x2 E_WRITE = 0x4 E_ANY = E_READ | E_ERROR | E_WRITE identifier = "(none)" def __init__(self, info): """ Initialize base class. """ # take a reference on info dict self.info = info # and update engine id self.info['engine'] = self.identifier # keep track of all clients self._clients = set() self._ports = set() # keep track of the number of registered clients (delayable only) self.reg_clients = 0 # keep track of registered file descriptors in a dict where keys # are fileno and values are clients self.reg_clifds = {} # Current loop iteration counter. It is the number of performed engine # loops in order to keep track of client registration epoch, so we can # safely process FDs by chunk and re-use FDs (see Engine._fd2client). self._current_loopcnt = 0 # Current client being processed self._current_client = None # timer queue to handle both timers and clients timeout self.timerq = _EngineTimerQ(self) # reference count to the event loop (must include registered # clients and timers configured WITHOUT autoclose) self.evlooprefcnt = 0 # running state self.running = False # runloop-has-exited flag self._exited = False def clients(self): """ Get a copy of clients set. """ return self._clients.copy() def ports(self): """ Get a copy of ports set. """ return self._ports.copy() def _fd2client(self, fd): client, fdev = self.reg_clifds.get(fd, (None, None)) if client: if client._reg_epoch < self._current_loopcnt: return client, fdev else: self._debug("ENGINE _fd2client: ignoring just re-used FD %d" \ % fd) return (None, None) def add(self, client): """ Add a client to engine. Subclasses that override this method should call base class method. """ # bind to engine client._set_engine(self) if client.delayable: # add to regular client set self._clients.add(client) else: # add to port set (non-delayable) self._ports.add(client) if self.running: # in-fly add if running if not client.delayable: self.register(client) elif self.info["fanout"] > self.reg_clients: self.register(client._start()) def _remove(self, client, abort, did_timeout=False, force=False): """ Remove a client from engine (subroutine). """ # be careful to also remove ports when engine has not started yet if client.registered or not client.delayable: if client.registered: self.unregister(client) # care should be taken to ensure correct closing flags client._close(abort=abort, flush=not force, timeout=did_timeout) def remove(self, client, abort=False, did_timeout=False): """ Remove a client from engine. Subclasses that override this method should call base class method. """ self._debug("REMOVE %s" % client) if client.delayable: self._clients.remove(client) else: self._ports.remove(client) self._remove(client, abort, did_timeout) self.start_all() def clear(self, did_timeout=False, clear_ports=False): """ Remove all clients. Subclasses that override this method should call base class method. """ all_clients = [self._clients] if clear_ports: all_clients.append(self._ports) for clients in all_clients: while len(clients) > 0: client = clients.pop() self._remove(client, True, did_timeout, force=True) def register(self, client): """ Register an engine client. Subclasses that override this method should call base class method. """ assert client in self._clients or client in self._ports assert not client.registered efd = client.fd_error rfd = client.fd_reader wfd = client.fd_writer assert rfd is not None or wfd is not None self._debug("REG %s(e%s,r%s,w%s)(autoclose=%s)" % \ (client.__class__.__name__, efd, rfd, wfd, client.autoclose)) client._events = 0 client.registered = True client._reg_epoch = self._current_loopcnt if client.delayable: self.reg_clients += 1 if client.autoclose: refcnt_inc = 0 else: refcnt_inc = 1 if efd != None: self.reg_clifds[efd] = client, Engine.E_ERROR client._events |= Engine.E_ERROR self.evlooprefcnt += refcnt_inc self._register_specific(efd, Engine.E_ERROR) if rfd != None: self.reg_clifds[rfd] = client, Engine.E_READ client._events |= Engine.E_READ self.evlooprefcnt += refcnt_inc self._register_specific(rfd, Engine.E_READ) if wfd != None: self.reg_clifds[wfd] = client, Engine.E_WRITE client._events |= Engine.E_WRITE self.evlooprefcnt += refcnt_inc self._register_specific(wfd, Engine.E_WRITE) client._new_events = client._events # start timeout timer self.timerq.schedule(client) def unregister_writer(self, client): self._debug("UNREG WRITER r%s,w%s" % (client.reader_fileno(), \ client.writer_fileno())) if client.autoclose: refcnt_inc = 0 else: refcnt_inc = 1 wfd = client.fd_writer if wfd != None: self._unregister_specific(wfd, client._events & Engine.E_WRITE) client._events &= ~Engine.E_WRITE del self.reg_clifds[wfd] self.evlooprefcnt -= refcnt_inc def unregister(self, client): """ Unregister a client. Subclasses that override this method should call base class method. """ # sanity check assert client.registered self._debug("UNREG %s (r%s,e%s,w%s)" % (client.__class__.__name__, client.reader_fileno(), client.error_fileno(), client.writer_fileno())) # remove timeout timer self.timerq.invalidate(client) if client.autoclose: refcnt_inc = 0 else: refcnt_inc = 1 # clear interest events efd = client.fd_error if efd is not None: self._unregister_specific(efd, client._events & Engine.E_ERROR) client._events &= ~Engine.E_ERROR del self.reg_clifds[efd] self.evlooprefcnt -= refcnt_inc rfd = client.fd_reader if rfd is not None: self._unregister_specific(rfd, client._events & Engine.E_READ) client._events &= ~Engine.E_READ del self.reg_clifds[rfd] self.evlooprefcnt -= refcnt_inc wfd = client.fd_writer if wfd is not None: self._unregister_specific(wfd, client._events & Engine.E_WRITE) client._events &= ~Engine.E_WRITE del self.reg_clifds[wfd] self.evlooprefcnt -= refcnt_inc client._new_events = 0 client.registered = False if client.delayable: self.reg_clients -= 1 def modify(self, client, setmask, clearmask): """ Modify the next loop interest events bitset for a client. """ self._debug("MODEV set:0x%x clear:0x%x %s" % (setmask, clearmask, client)) client._new_events &= ~clearmask client._new_events |= setmask if self._current_client is not client: # modifying a non processing client, apply new_events now self.set_events(client, client._new_events) def _register_specific(self, fd, event): """Engine-specific register fd for event method.""" raise NotImplementedError("Derived classes must implement.") def _unregister_specific(self, fd, ev_is_set): """Engine-specific unregister fd method.""" raise NotImplementedError("Derived classes must implement.") def _modify_specific(self, fd, event, setvalue): """Engine-specific modify fd for event method.""" raise NotImplementedError("Derived classes must implement.") def set_events(self, client, new_events): """ Set the active interest events bitset for a client. """ self._debug("SETEV new_events:0x%x events:0x%x %s" % (new_events, client._events, client)) if not client.registered: logging.getLogger(__name__).debug( \ "set_events: client %s not registered" % self) return chgbits = new_events ^ client._events if chgbits == 0: return # configure interest events as appropriate efd = client.fd_error if efd is not None: if chgbits & Engine.E_ERROR: status = new_events & Engine.E_ERROR self._modify_specific(efd, Engine.E_ERROR, status) if status: client._events |= Engine.E_ERROR else: client._events &= ~Engine.E_ERROR rfd = client.fd_reader if rfd is not None: if chgbits & Engine.E_READ: status = new_events & Engine.E_READ self._modify_specific(rfd, Engine.E_READ, status) if status: client._events |= Engine.E_READ else: client._events &= ~Engine.E_READ wfd = client.fd_writer if wfd is not None: if chgbits & Engine.E_WRITE: status = new_events & Engine.E_WRITE self._modify_specific(wfd, Engine.E_WRITE, status) if status: client._events |= Engine.E_WRITE else: client._events &= ~Engine.E_WRITE client._new_events = client._events def set_reading(self, client): """ Set client reading state. """ # listen for readable events self.modify(client, Engine.E_READ, 0) def set_reading_error(self, client): """ Set client reading error state. """ # listen for readable events self.modify(client, Engine.E_ERROR, 0) def set_writing(self, client): """ Set client writing state. """ # listen for writable events self.modify(client, Engine.E_WRITE, 0) def add_timer(self, timer): """ Add engine timer. """ timer._set_engine(self) self.timerq.schedule(timer) def remove_timer(self, timer): """ Remove engine timer. """ self.timerq.invalidate(timer) def fire_timers(self): """ Fire expired timers for processing. """ while self.timerq.expired(): self.timerq.fire() def start_ports(self): """ Start and register all port clients. """ # Ports are special, non-delayable engine clients for port in self._ports: if not port.registered: self._debug("START PORT %s" % port) self.register(port) def start_all(self): """ Start and register all other possible clients, in respect of task fanout. """ # Get current fanout value fanout = self.info["fanout"] assert fanout > 0 if fanout <= self.reg_clients: return # Register regular engine clients within the fanout limit for client in self._clients: if not client.registered: self._debug("START CLIENT %s" % client.__class__.__name__) self.register(client._start()) if fanout <= self.reg_clients: break def run(self, timeout): """ Run engine in calling thread. """ # change to running state if self.running: raise EngineAlreadyRunningError() self.running = True # start port clients self.start_ports() # peek in ports for early pending messages self.snoop_ports() # start all other clients self.start_all() # note: try-except-finally not supported before python 2.5 try: try: self.runloop(timeout) except Exception, e: # any exceptions invalidate clients self.clear(isinstance(e, EngineTimeoutException)) raise finally: # cleanup self.timerq.clear() self.running = False def snoop_ports(self): """ Peek in ports for possible early pending messages. This method simply tries to read port pipes in non- blocking mode. """ # make a copy so that early messages on installed ports may # lead to new ports ports = self._ports.copy() for port in ports: try: port._handle_read() except (IOError, OSError), (err, strerr): if err == errno.EAGAIN or err == errno.EWOULDBLOCK: # no pending message return # raise any other error raise def runloop(self, timeout): """ Engine specific run loop. Derived classes must implement. """ raise NotImplementedError("Derived classes must implement.") def abort(self, kill): """ Abort runloop. """ if self.running: raise EngineAbortException(kill) self.clear(clear_ports=kill) def exited(self): """ Returns True if the engine has exited the runloop once. """ return not self.running and self._exited def _debug(self, s): # library engine debugging hook #import sys #print >>sys.stderr, s pass clustershell-1.6/lib/ClusterShell/Engine/__init__.py0000644000130500135250000000000011741571247022106 0ustar thiellgpocreclustershell-1.6/lib/ClusterShell/Engine/Poll.py0000644000130500135250000002044411741571247021273 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ A poll() based ClusterShell Engine. The poll() system call is available on Linux and BSD. """ import errno import os import select import sys import time from ClusterShell.Engine.Engine import Engine from ClusterShell.Engine.Engine import EngineException from ClusterShell.Engine.Engine import EngineNotSupportedError from ClusterShell.Engine.Engine import EngineTimeoutException from ClusterShell.Worker.EngineClient import EngineClientEOF class EnginePoll(Engine): """ Poll Engine ClusterShell engine using the select.poll mechanism (Linux poll() syscall). """ identifier = "poll" def __init__(self, info): """ Initialize Engine. """ Engine.__init__(self, info) try: # get a polling object self.polling = select.poll() except AttributeError: raise EngineNotSupportedError(EnginePoll.identifier) def _register_specific(self, fd, event): if event & (Engine.E_READ | Engine.E_ERROR): eventmask = select.POLLIN elif event == Engine.E_WRITE: eventmask = select.POLLOUT self.polling.register(fd, eventmask) def _unregister_specific(self, fd, ev_is_set): if ev_is_set: self.polling.unregister(fd) def _modify_specific(self, fd, event, setvalue): """ Engine-specific modifications after a interesting event change for a file descriptor. Called automatically by Engine register/unregister and set_events(). For the poll() engine, it reg/unreg or modifies the event mask associated to a file descriptor. """ self._debug("MODSPEC fd=%d event=%x setvalue=%d" % (fd, event, setvalue)) if setvalue: eventmask = 0 if event & (Engine.E_READ | Engine.E_ERROR): eventmask = select.POLLIN elif event == Engine.E_WRITE: eventmask = select.POLLOUT self.polling.register(fd, eventmask) else: self.polling.unregister(fd) def runloop(self, timeout): """ Poll engine run(): start clients and properly get replies """ if timeout == 0: timeout = -1 start_time = time.time() # run main event loop... while self.evlooprefcnt > 0: self._debug("LOOP evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" \ % (self.evlooprefcnt, self.reg_clifds.keys(), \ len(self.timerq))) try: timeo = self.timerq.nextfire_delay() if timeout > 0 and timeo >= timeout: # task timeout may invalidate clients timeout self.timerq.clear() timeo = timeout elif timeo == -1: timeo = timeout self._current_loopcnt += 1 evlist = self.polling.poll(timeo * 1000.0 + 1.0) except select.error, (ex_errno, ex_strerror): # might get interrupted by a signal if ex_errno == errno.EINTR: continue elif ex_errno == errno.EINVAL: print >> sys.stderr, \ "EnginePoll: please increase RLIMIT_NOFILE" raise for fd, event in evlist: if event & select.POLLNVAL: raise EngineException("Caught POLLNVAL on fd %d" % fd) # get client instance client, fdev = self._fd2client(fd) if client is None: continue # process this client client._current_client = client # check for poll error condition of some sort if event & select.POLLERR: self._debug("POLLERR %s" % client) self.unregister_writer(client) os.close(client.fd_writer) client.fd_writer = None client._current_client = None continue # check for data to read if event & select.POLLIN: assert fdev & (Engine.E_READ | Engine.E_ERROR) assert client._events & fdev self.modify(client, 0, fdev) try: if fdev & Engine.E_READ: client._handle_read() else: client._handle_error() except EngineClientEOF: self._debug("EngineClientEOF %s" % client) if fdev & Engine.E_READ: self.remove(client) client._current_client = None continue # or check for end of stream (do not handle both at the same # time because handle_read() may perform a partial read) elif event & select.POLLHUP: self._debug("POLLHUP fd=%d %s (r%s,e%s,w%s)" % (fd, client.__class__.__name__, client.fd_reader, client.fd_error, client.fd_writer)) if fdev & Engine.E_READ: if client._events & Engine.E_ERROR: self.modify(client, 0, fdev) else: self.remove(client) else: if client._events & Engine.E_READ: self.modify(client, 0, fdev) else: self.remove(client) # check for writing if event & select.POLLOUT: self._debug("POLLOUT fd=%d %s (r%s,e%s,w%s)" % (fd, client.__class__.__name__, client.fd_reader, client.fd_error, client.fd_writer)) assert fdev == Engine.E_WRITE assert client._events & fdev self.modify(client, 0, fdev) client._handle_write() client._current_client = None # apply any changes occured during processing if client.registered: self.set_events(client, client._new_events) # check for task runloop timeout if timeout > 0 and time.time() >= start_time + timeout: raise EngineTimeoutException() # process clients timeout self.fire_timers() self._debug("LOOP EXIT evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % \ (self.evlooprefcnt, self.reg_clifds, len(self.timerq))) clustershell-1.6/lib/ClusterShell/Engine/Factory.py0000644000130500135250000000574111741571247021777 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ Engine Factory to select the best working event engine for the current version of Python and Operating System. """ import sys from ClusterShell.Engine.Engine import EngineNotSupportedError # Available event engines from ClusterShell.Engine.EPoll import EngineEPoll from ClusterShell.Engine.Poll import EnginePoll from ClusterShell.Engine.Select import EngineSelect class PreferredEngine(object): """ Preferred Engine selection metaclass (DP Abstract Factory). """ engines = { EngineEPoll.identifier: EngineEPoll, EnginePoll.identifier: EnginePoll, EngineSelect.identifier: EngineSelect } def __new__(cls, hint, info): """ Create a new preferred Engine. """ if not hint or hint == 'auto': # in order or preference for engine_class in [ EngineEPoll, EnginePoll, EngineSelect ]: try: return engine_class(info) except EngineNotSupportedError: pass raise RuntimeError("FATAL: No supported ClusterShell.Engine found") else: # User overriding engine selection try: # constructor may raise EngineNotSupportedError return cls.engines[hint](info) except KeyError, exc: print >> sys.stderr, "Invalid engine identifier", exc raise clustershell-1.6/lib/ClusterShell/Engine/EPoll.py0000644000130500135250000002014711741571247021400 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2009, 2010, 2011) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ A ClusterShell Engine using epoll, an I/O event notification facility. The epoll event distribution interface is available on Linux 2.6, and has been included in Python 2.6. """ import errno import select import time from ClusterShell.Engine.Engine import Engine from ClusterShell.Engine.Engine import EngineNotSupportedError from ClusterShell.Engine.Engine import EngineTimeoutException from ClusterShell.Worker.EngineClient import EngineClientEOF class EngineEPoll(Engine): """ EPoll Engine ClusterShell Engine class using the select.epoll mechanism. """ identifier = "epoll" def __init__(self, info): """ Initialize Engine. """ Engine.__init__(self, info) try: # get an epoll object self.epolling = select.epoll() except AttributeError: raise EngineNotSupportedError(EngineEPoll.identifier) def _register_specific(self, fd, event): """ Engine-specific fd registering. Called by Engine register. """ if event & (Engine.E_READ | Engine.E_ERROR): eventmask = select.EPOLLIN elif event == Engine.E_WRITE: eventmask = select.EPOLLOUT self.epolling.register(fd, eventmask) def _unregister_specific(self, fd, ev_is_set): """ Engine-specific fd unregistering. Called by Engine unregister. """ self._debug("UNREGSPEC fd=%d ev_is_set=%x"% (fd, ev_is_set)) if ev_is_set: self.epolling.unregister(fd) def _modify_specific(self, fd, event, setvalue): """ Engine-specific modifications after a interesting event change for a file descriptor. Called automatically by Engine set_events(). For the epoll engine, it modifies the event mask associated to a file descriptor. """ self._debug("MODSPEC fd=%d event=%x setvalue=%d" % (fd, event, setvalue)) eventmask = 0 if setvalue: if event & (Engine.E_READ | Engine.E_ERROR): eventmask = select.EPOLLIN elif event == Engine.E_WRITE: eventmask = select.EPOLLOUT self.epolling.register(fd, eventmask) else: self.epolling.unregister(fd) def runloop(self, timeout): """ Run epoll main loop. """ if timeout == 0: timeout = -1 start_time = time.time() # run main event loop... while self.evlooprefcnt > 0: self._debug("LOOP evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % \ (self.evlooprefcnt, self.reg_clifds.keys(), len(self.timerq))) try: timeo = self.timerq.nextfire_delay() if timeout > 0 and timeo >= timeout: # task timeout may invalidate clients timeout self.timerq.clear() timeo = timeout elif timeo == -1: timeo = timeout self._current_loopcnt += 1 evlist = self.epolling.poll(timeo + 0.001) except IOError, ex: # might get interrupted by a signal if ex.errno == errno.EINTR: continue for fd, event in evlist: # get client instance client, fdev = self._fd2client(fd) if client is None: continue # set as current processed client self._current_client = client # check for poll error condition of some sort if event & select.EPOLLERR: self._debug("EPOLLERR %s" % client) client._close_writer() self._current_client = None continue # check for data to read if event & select.EPOLLIN: #self._debug("EPOLLIN fd=%d %s" % (fd, client)) assert fdev & (Engine.E_READ | Engine.E_ERROR) assert client._events & fdev self.modify(client, 0, fdev) try: if fdev & Engine.E_READ: client._handle_read() else: client._handle_error() except EngineClientEOF: self._debug("EngineClientEOF %s" % client) if fdev & Engine.E_READ: self.remove(client) self._current_client = None continue # or check for end of stream (do not handle both at the same # time because handle_read() may perform a partial read) elif event & select.EPOLLHUP: self._debug("EPOLLHUP fd=%d %s (r%s,e%s,w%s)" % (fd, client.__class__.__name__, client.fd_reader, client.fd_error, client.fd_writer)) if fdev & Engine.E_READ: if client._events & Engine.E_ERROR: self.modify(client, 0, fdev) else: self.remove(client) else: if client._events & Engine.E_READ: self.modify(client, 0, fdev) else: self.remove(client) # check for writing if event & select.EPOLLOUT: self._debug("EPOLLOUT fd=%d %s (r%s,e%s,w%s)" % (fd, client.__class__.__name__, client.fd_reader, client.fd_error, client.fd_writer)) assert fdev == Engine.E_WRITE assert client._events & fdev self.modify(client, 0, fdev) client._handle_write() self._current_client = None # apply any changes occured during processing if client.registered: self.set_events(client, client._new_events) # check for task runloop timeout if timeout > 0 and time.time() >= start_time + timeout: raise EngineTimeoutException() # process clients timeout self.fire_timers() self._debug("LOOP EXIT evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % \ (self.evlooprefcnt, self.reg_clifds, len(self.timerq))) clustershell-1.6/lib/ClusterShell/Gateway.py0000644000130500135250000002515311741571247020563 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Henri DOREAU # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell agent launched on remote gateway nodes. This script reads messages on stdin via the SSH connexion, interprets them, takes decisions, and prints out replies on stdout. """ import logging import os import sys from ClusterShell.Event import EventHandler from ClusterShell.NodeSet import NodeSet from ClusterShell.Task import task_self, _getshorthostname from ClusterShell.Engine.Engine import EngineAbortException from ClusterShell.Worker.fastsubprocess import set_nonblock_flag from ClusterShell.Worker.Worker import WorkerSimple from ClusterShell.Worker.Tree import WorkerTree from ClusterShell.Communication import Channel, ConfigurationMessage, \ ControlMessage, ACKMessage, ErrorMessage, EndMessage, StdOutMessage, \ StdErrMessage, RetcodeMessage, TimeoutMessage class WorkerTreeResponder(EventHandler): """Gateway WorkerTree handler""" def __init__(self, task, gwchan, srcwkr): EventHandler.__init__(self) self.gwchan = gwchan # gateway channel self.srcwkr = srcwkr # id of distant parent WorkerTree self.worker = None # local WorkerTree instance # For messages grooming qdelay = task.info("grooming_delay") self.timer = task.timer(qdelay, self, qdelay, autoclose=True) self.logger = logging.getLogger(__name__) self.logger.debug("WorkerTreeResponder: initialized") def ev_start(self, worker): self.logger.debug("WorkerTreeResponder: ev_start") self.worker = worker def ev_timer(self, timer): """perform gateway traffic grooming""" if not self.worker: return logger = self.logger # check for grooming opportunities for msg_elem, nodes in self.worker.iter_errors(): logger.debug("iter(stderr): %s: %d bytes" % \ (nodes, len(msg_elem.message()))) self.gwchan.send(StdErrMessage(nodes, msg_elem.message(), \ self.srcwkr)) for msg_elem, nodes in self.worker.iter_buffers(): logger.debug("iter(stdout): %s: %d bytes" % \ (nodes, len(msg_elem.message()))) self.gwchan.send(StdOutMessage(nodes, msg_elem.message(), \ self.srcwkr)) self.worker.flush_buffers() def ev_error(self, worker): self.logger.debug("WorkerTreeResponder: ev_error %s" % \ worker.current_errmsg) def ev_timeout(self, worker): """Received timeout event: some nodes did timeout""" self.gwchan.send(TimeoutMessage( \ NodeSet._fromlist1(worker.iter_keys_timeout()), self.srcwkr)) def ev_close(self, worker): """End of responder""" self.logger.debug("WorkerTreeResponder: ev_close") # finalize grooming self.ev_timer(None) # send retcodes for rc, nodes in self.worker.iter_retcodes(): self.logger.debug("iter(rc): %s: rc=%d" % (nodes, rc)) self.gwchan.send(RetcodeMessage(nodes, rc, self.srcwkr)) self.timer.invalidate() # clean channel closing ####self.gwchan.close() class GatewayChannel(Channel): """high level logic for gateways""" def __init__(self, task, hostname): """ """ Channel.__init__(self) self.task = task self.hostname = hostname self.topology = None self.propagation = None self.logger = logging.getLogger(__name__) self.current_state = None self.states = { 'CFG': self._state_cfg, 'CTL': self._state_ctl, 'GTR': self._state_gtr, } def start(self): """initialization""" self._open() # prepare to receive topology configuration self.current_state = self.states['CFG'] self.logger.debug('entering config state') def close(self): """close gw channel""" self.logger.debug('closing gw channel') self._close() self.current_state = None def recv(self, msg): """handle incoming message""" try: self.logger.debug('handling incoming message: %s', str(msg)) if msg.ident == EndMessage.ident: self.logger.debug('recv: got EndMessage') self.worker.abort() else: self.current_state(msg) except Exception, ex: self.logger.exception('on recv(): %s', str(ex)) self.send(ErrorMessage(str(ex))) def _state_cfg(self, msg): """receive topology configuration""" if msg.type == ConfigurationMessage.ident: self.topology = msg.data_decode() task_self().topology = self.topology self.logger.debug('decoded propagation tree') self.logger.debug('%s' % str(self.topology)) self._ack(msg) self.current_state = self.states['CTL'] self.logger.debug('entering control state') else: logging.error('unexpected message: %s', str(msg)) def _state_ctl(self, msg): """receive control message with actions to perform""" if msg.type == ControlMessage.ident: self.logger.debug('GatewayChannel._state_ctl') self._ack(msg) if msg.action == 'shell': data = msg.data_decode() cmd = data['cmd'] stderr = data['stderr'] timeout = data['timeout'] #self.propagation.invoke_gateway = data['invoke_gateway'] self.logger.debug('decoded gw invoke (%s)', \ data['invoke_gateway']) taskinfo = data['taskinfo'] task = task_self() task._info = taskinfo task._engine.info = taskinfo #logging.setLevel(logging.DEBUG) self.logger.debug('assigning task infos (%s)' % \ str(data['taskinfo'])) self.logger.debug('inherited fanout value=%d', \ task.info("fanout")) #self.current_state = self.states['GTR'] self.logger.debug('launching execution/enter gathering state') responder = WorkerTreeResponder(task, self, msg.srcid) self.propagation = WorkerTree(msg.target, responder, timeout, command=cmd, topology=self.topology, newroot=self.hostname, stderr=stderr) responder.worker = self.propagation # FIXME ev_start-not-called workaround self.propagation.upchannel = self task.schedule(self.propagation) self.logger.debug("WorkerTree scheduled") else: logging.error('unexpected message: %s', str(msg)) def _state_gtr(self, msg): """gather outputs""" # FIXME self.logger.debug('GatewayChannel._state_gtr') self.logger.debug('incoming output msg: %s' % str(msg)) def _ack(self, msg): """acknowledge a received message""" self.send(ACKMessage(msg.msgid)) def gateway_main(): """ClusterShell gateway entry point""" host = _getshorthostname() # configure root logger logdir = os.path.expanduser(os.environ.get('CLUSTERSHELL_GW_LOG_DIR', \ '/tmp')) loglevel = os.environ.get('CLUSTERSHELL_GW_LOG_LEVEL', 'INFO') logging.basicConfig(level=getattr(logging, loglevel.upper(), logging.INFO), format='%(asctime)s %(name)s %(levelname)s %(message)s', filename=os.path.join(logdir, "%s.gw.log" % host)) logger = logging.getLogger(__name__) logger.debug('Starting gateway on %s', host) logger.debug("environ=%s" % os.environ) set_nonblock_flag(sys.stdin.fileno()) set_nonblock_flag(sys.stdout.fileno()) set_nonblock_flag(sys.stderr.fileno()) task = task_self() # Pre-enable MsgTree buffering on gateway (not available at runtime - #181) task.set_default("stdout_msgtree", True) task.set_default("stderr_msgtree", True) if sys.stdin.isatty(): logger.critical('Gateway failure: sys.stdin.isatty() is True') sys.exit(1) worker = WorkerSimple(sys.stdin, sys.stdout, sys.stderr, None, handler=GatewayChannel(task, host)) task.schedule(worker) logger.debug('Starting task') try: task.resume() logger.debug('Task performed') except EngineAbortException, exc: pass except IOError, exc: logger.debug('Broken pipe (%s)' % exc) raise except Exception, exc: logger.exception('Gateway failure: %s' % exc) logger.debug('The End') if __name__ == '__main__': __name__ = 'ClusterShell.Gateway' # To enable gateway profiling: #import cProfile #cProfile.run('gateway_main()', '/tmp/gwprof') gateway_main() clustershell-1.6/lib/ClusterShell/Topology.py0000644000130500135250000003706711741571247021005 0ustar thiellgpocre#!/usr/bin/env python # # Copyright CEA/DAM/DIF (2010, 2011, 2012) # Contributor: Henri DOREAU # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell topology module This module contains the network topology parser and its related classes. These classes are used to build a topology tree of nodegroups according to the configuration file. This file must be written using the following syntax: # for now only [Main] tree is taken in account: [Main] admin: first_level_gateways[0-10] first_level_gateways[0-10]: second_level_gateways[0-100] second_level_gateways[0-100]: nodes[0-2000] ... """ import ConfigParser from ClusterShell.NodeSet import NodeSet class TopologyError(Exception): """topology parser error to report invalid configurations or parsing errors """ class TopologyNodeGroup(object): """Base element for in-memory representation of the propagation tree. Contains a nodeset, with parent-children relationships with other instances. """ def __init__(self, nodeset=None): """ """ # Base nodeset self.nodeset = nodeset # Parent TopologyNodeGroup (TNG) instance self.parent = None # List of children TNG instances self._children = [] self._children_len = 0 # provided for convenience self._children_ns = None def printable_subtree(self, prefix=''): """recursive method that returns a printable version the subtree from the current node with a nice presentation """ res = '' # For now, it is ok to use a recursive method here as we consider that # tree depth is relatively small. if self.parent is None: # root res = '%s\n' % str(self.nodeset) elif self.parent.parent is None: # first level if not self._is_last(): res = '|- %s\n' % str(self.nodeset) else: res = '`- %s\n' % str(self.nodeset) else: # deepest levels... if not self.parent._is_last(): prefix += '| ' else: # fix last line prefix += ' ' if not self._is_last(): res = '%s|- %s\n' % (prefix, str(self.nodeset)) else: res = '%s`- %s\n' % (prefix, str(self.nodeset)) # perform recursive calls to print out every node for child in self._children: res += child.printable_subtree(prefix) return res def add_child(self, child): """add a child to the children list and define the current instance as its parent """ assert isinstance(child, TopologyNodeGroup) if child in self._children: return child.parent = self self._children.append(child) if self._children_ns is None: self._children_ns = NodeSet() self._children_ns.add(child.nodeset) def clear_child(self, child, strict=False): """remove a child""" try: self._children.remove(child) self._children_ns.difference_update(child.nodeset) if len(self._children_ns) == 0: self._children_ns = None except ValueError: if strict: raise def clear_children(self): """delete all children""" self._children = [] self._children_ns = None def children(self): """get the children list""" return self._children def children_ns(self): """return the children as a nodeset""" return self._children_ns def children_len(self): """returns the number of children as the sum of the size of the children's nodeset """ if self._children_ns is None: return 0 else: return len(self._children_ns) def _is_last(self): """used to display the subtree: we won't prefix the line the same way if the current instance is the last child of the children list of its parent. """ return self.parent._children[-1::][0] == self def __str__(self): """printable representation of the nodegroup""" return '' % str(self.nodeset) class TopologyTree(object): """represent a simplified network topology as a tree of machines to use to connect to other ones """ class TreeIterator: """efficient tool for tree-traversal""" def __init__(self, tree): """we do simply manage a stack with the remaining nodes""" self._stack = [tree.root] def next(self): """return the next node in the stack or raise a StopIteration exception if the stack is empty """ if len(self._stack) > 0 and self._stack[0] is not None: node = self._stack.pop() self._stack += node.children() return node else: raise StopIteration() def __init__(self): self.root = None self.groups = [] def load(self, rootnode): """load topology tree""" self.root = rootnode stack = [rootnode] while len(stack) > 0: curr = stack.pop() self.groups.append(curr) if curr.children_len() > 0: stack += curr.children() def __iter__(self): """provide an iterator on the tree's elements""" return TopologyTree.TreeIterator(self) def __str__(self): """printable representation of the tree""" if self.root is None: return '' return self.root.printable_subtree() class TopologyRoute(object): """A single route between two nodesets""" def __init__(self, src_ns, dst_ns): """both src_ns and dst_ns are expected to be non-empty NodeSet instances """ self.src = src_ns self.dst = dst_ns if len(src_ns & dst_ns) != 0: raise TopologyError( 'Source and destination nodesets overlap') def dest(self, nodeset=None): """get the route's destination. The optionnal argument serves for convenience and provides a way to use the method for a subset of the whole source nodeset """ if nodeset is None or nodeset in self.src: return self.dst else: return None def __str__(self): """printable representation""" return '%s -> %s' % (str(self.src), str(self.dst)) class TopologyRoutingTable(object): """This class provides a convenient way to store and manage topology routes """ def __init__(self): self._routes = [] self.aggregated_src = NodeSet() self.aggregated_dst = NodeSet() def add_route(self, route): """add a new route to the table. The route argument is expected to be a TopologyRoute instance """ if self._introduce_circular_reference(route): raise TopologyError( 'Loop detected! Cannot add route %s' % str(route)) if self._introduce_convergent_paths(route): raise TopologyError( 'Convergent path detected! Cannot add route %s' % str(route)) self._routes.append(route) self.aggregated_src.add(route.src) self.aggregated_dst.add(route.dst) def connected(self, src_ns): """find out and return the aggregation of directly connected children from src_ns. Argument src_ns is expected to be a NodeSet instance. Result is returned as a NodeSet instance """ next_hop = NodeSet.fromlist([dst for dst in \ [route.dest(src_ns) for route in self._routes] if dst is not None]) if len(next_hop) == 0: return None return next_hop def __str__(self): """printable representation""" return '\n'.join([str(route) for route in self._routes]) def __iter__(self): """return an iterator over the list of rotues""" return iter(self._routes) def _introduce_circular_reference(self, route): """check whether the last added route adds a topology loop or not""" current_ns = route.dst # iterate over the destinations until we find None or we come back on # the src while True: _dest = self.connected(current_ns) if _dest is None or len(_dest) == 0: return False if len(_dest & route.src) != 0: return True current_ns = _dest def _introduce_convergent_paths(self, route): """check for undesired convergent paths""" for known_route in self._routes: # source cannot be a superset of an already known destination if route.src > known_route.dst: return True # same thing... if route.dst < known_route.src: return True # two different nodegroups cannot point to the same one if len(route.dst & known_route.dst) != 0 \ and route.src != known_route.src: return True return False class TopologyGraph(object): """represent a complete network topology by storing every "can reach" relations between nodes. """ def __init__(self): self._routing = TopologyRoutingTable() self._nodegroups = {} self._root = '' def add_route(self, src_ns, dst_ns): """add a new route from src nodeset to dst nodeset. The destination nodeset must not overlap with already known destination nodesets (otherwise a TopologyError is raised) """ assert isinstance(src_ns, NodeSet) assert isinstance(dst_ns, NodeSet) #print 'adding %s -> %s' % (str(src_ns), str(dst_ns)) self._routing.add_route(TopologyRoute(src_ns, dst_ns)) def dest(self, from_nodeset): """return the aggregation of the destinations for a given nodeset""" return self._routing.connected(from_nodeset) def to_tree(self, root): """convert the routing table to a topology tree of nodegroups""" # convert the routing table into a table of linked TopologyNodeGroup's self._routes_to_tng() # ensure this is a valid pseudo-tree self._validate(root) tree = TopologyTree() tree.load(self._nodegroups[self._root]) return tree def __str__(self): """printable representation of the graph""" res = '\n' res += '\n'.join(['%s: %s' % (str(k), str(v)) for k, v in \ self._nodegroups.iteritems()]) return res def _routes_to_tng(self): """convert the routing table into a graph of TopologyNodeGroup instances. Loops are not very expensive here as the number of routes will always be much lower than the number of nodes. """ # instanciate nodegroups as biggest groups of nodes sharing both parent # and destination aggregated_src = self._routing.aggregated_src for route in self._routing: self._nodegroups[str(route.src)] = TopologyNodeGroup(route.src) # create a nodegroup for the destination if it is a leaf group. # Otherwise, it will be created as src for another route leaf = route.dst - aggregated_src if len(leaf) > 0: self._nodegroups[str(leaf)] = TopologyNodeGroup(leaf) # add the parent <--> children relationships for group in self._nodegroups.itervalues(): dst_ns = self._routing.connected(group.nodeset) if dst_ns is not None: for child in self._nodegroups.itervalues(): if child.nodeset in dst_ns: group.add_child(child) def _validate(self, root): """ensure that the graph is valid for conversion to tree""" if len(self._nodegroups) == 0: raise TopologyError("No route found in topology definition!") # ensure that every node is reachable src_all = self._routing.aggregated_src dst_all = self._routing.aggregated_dst res = [(k, v) for k, v in self._nodegroups.items() if root in v.nodeset] if len(res) > 0: kgroup, group = res[0] del self._nodegroups[kgroup] self._nodegroups[root] = group else: raise TopologyError('"%s" is not a valid root node!' % root) self._root = root class TopologyParser(ConfigParser.ConfigParser): """This class offers a way to interpret network topologies supplied under the form : # Comment : """ def __init__(self): """instance wide variables initialization""" ConfigParser.ConfigParser.__init__(self) self.optionxform = str # case sensitive parser self._topology = {} self.graph = None self._tree = None def load(self, filename): """read a given topology configuration file and store the results in self._routes. Then build a propagation tree. """ try: self.read(filename) self._topology = self.items("Main") except ConfigParser.Error: raise TopologyError( 'Invalid configuration file: %s' % filename) self._build_graph() def _build_graph(self): """build a network topology graph according to the information we got from the configuration file. """ self.graph = TopologyGraph() for src, dst in self._topology: self.graph.add_route(NodeSet(src), NodeSet(dst)) def tree(self, root, force_rebuild=False): """Return a previously generated propagation tree or build it if required. As rebuilding tree can be quite expensive, once built, the propagation tree is cached. you can force a re-generation using the optionnal `force_rebuild' parameter. """ if self._tree is None or force_rebuild: self._tree = self.graph.to_tree(root) return self._tree clustershell-1.6/lib/ClusterShell/Task.py0000644000130500135250000014345711741571247020074 0ustar thiellgpocre# # Copyright CEA/DAM/DIF (2007, 2008, 2009, 2010, 2011, 2012) # Contributor: Stephane THIELL # # This file is part of the ClusterShell library. # # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-C license and that you accept its terms. """ ClusterShell Task module. Simple example of use: >>> from ClusterShell.Task import task_self >>> >>> # get task associated with calling thread ... task = task_self() >>> >>> # add a command to execute on distant nodes ... task.shell("/bin/uname -r", nodes="tiger[1-30,35]") >>> >>> # run task in calling thread ... task.resume() >>> >>> # get results ... for buf, nodelist in task.iter_buffers(): ... print NodeSet.fromlist(nodelist), buf ... """ from itertools import imap import logging from operator import itemgetter import socket import sys import threading from time import sleep import traceback from ClusterShell.Engine.Engine import EngineAbortException from ClusterShell.Engine.Engine import EngineTimeoutException from ClusterShell.Engine.Engine import EngineAlreadyRunningError from ClusterShell.Engine.Engine import EngineTimer from ClusterShell.Engine.Factory import PreferredEngine from ClusterShell.Worker.EngineClient import EnginePort from ClusterShell.Worker.Ssh import WorkerSsh from ClusterShell.Worker.Popen import WorkerPopen from ClusterShell.Worker.Tree import WorkerTree from ClusterShell.Event import EventHandler from ClusterShell.MsgTree import MsgTree from ClusterShell.NodeSet import NodeSet from ClusterShell.Topology import TopologyParser, TopologyError from ClusterShell.Propagation import PropagationTreeRouter, PropagationChannel class TaskException(Exception): """Base task exception.""" class TaskError(TaskException): """Base task error exception.""" class TimeoutError(TaskError): """Raised when the task timed out.""" class AlreadyRunningError(TaskError): """Raised when trying to resume an already running task.""" class TaskMsgTreeError(TaskError): """Raised when trying to access disabled MsgTree.""" def _getshorthostname(): """Get short hostname (host name cut at the first dot)""" return socket.gethostname().split('.')[0] def _task_print_debug(task, s): """ Default task debug printing function. Cannot provide 'print' directly as it is not a function (will be in Py3k!). """ print s class Task(object): """ The Task class defines an essential ClusterShell object which aims to execute commands in parallel and easily get their results. More precisely, a Task object manages a coordinated (ie. with respect of its current parameters) collection of independent parallel Worker objects. See ClusterShell.Worker.Worker for further details on ClusterShell Workers. Always bound to a specific thread, a Task object acts like a "thread singleton". So most of the time, and even more for single-threaded applications, you can get the current task object with the following top-level Task module function: >>> task = task_self() However, if you want to create a task in a new thread, use: >>> task = Task() To create or get the instance of the task associated with the thread object thr (threading.Thread): >>> task = Task(thread=thr) To submit a command to execute locally within task, use: >>> task.shell("/bin/hostname") To submit a command to execute to some distant nodes in parallel, use: >>> task.shell("/bin/hostname", nodes="tiger[1-20]") The previous examples submit commands to execute but do not allow result interaction during their execution. For your program to interact during command execution, it has to define event handlers that will listen for local or remote events. These handlers are based on the EventHandler class, defined in ClusterShell.Event. The following example shows how to submit a command on a cluster with a registered event handler: >>> task.shell("uname -r", nodes="node[1-9]", handler=MyEventHandler()) Run task in its associated thread (will block only if the calling thread is the task associated thread): >>> task.resume() or >>> task.run() You can also pass arguments to task.run() to schedule a command exactly like in task.shell(), and run it: >>> task.run("hostname", nodes="tiger[1-20]", handler=MyEventHandler()) A common need is to set a maximum delay for command execution, especially when the command time is not known. Doing this with ClusterShell Task is very straighforward. To limit the execution time on each node, use the timeout parameter of shell() or run() methods to set a delay in seconds, like: >>> task.run("check_network.sh", nodes="tiger[1-20]", timeout=30) You can then either use Task's iter_keys_timeout() method after execution to see on what nodes the command has timed out, or listen for ev_timeout() events in your event handler. To get command result, you can either use Task's iter_buffers() method for standard output, iter_errors() for standard error after command execution (common output contents are automatically gathered), or you can listen for ev_read() and ev_error() events in your event handler and get live command output. To get command return codes, you can either use Task's iter_retcodes(), node_retcode() and max_retcode() methods after command execution, or listen for ev_hup() events in your event handler. """ _std_default = { "stderr" : False, "stdout_msgtree" : True, "stderr_msgtree" : True, "engine" : 'auto', "port_qlimit" : 100, "auto_tree" : False, "topology_file" : "/etc/clustershell/topology.conf" } _std_info = { "debug" : False, "print_debug" : _task_print_debug, "fanout" : 64, "grooming_delay" : 0.25, "connect_timeout" : 10, "command_timeout" : 0 } _tasks = {} _taskid_max = 0 _task_lock = threading.Lock() class _SyncMsgHandler(EventHandler): """Special task control port event handler. When a message is received on the port, call appropriate task method.""" def ev_msg(self, port, msg): """Message received: call appropriate task method.""" # pull out function and its arguments from message func, (args, kwargs) = msg[0], msg[1:] # call task method func(port.task, *args, **kwargs) class tasksyncmethod(object): """Class encapsulating a function that checks if the calling task is running or is the current task, and allowing it to be used as a decorator making the wrapped task method thread-safe.""" def __call__(self, f): def taskfunc(*args, **kwargs): # pull out the class instance task, fargs = args[0], args[1:] # check if the calling task is the current thread task if task._is_task_self(): return f(task, *fargs, **kwargs) elif task._dispatch_port: # no, safely call the task method by message # through the task special dispatch port task._dispatch_port.msg_send((f, fargs, kwargs)) else: task.info("print_debug")(task, "%s: dropped call: %s" % \ (task, str(fargs))) # modify the decorator meta-data for pydoc # Note: should be later replaced by @wraps (functools) # as of Python 2.5 taskfunc.__name__ = f.__name__ taskfunc.__doc__ = f.__doc__ taskfunc.__dict__ = f.__dict__ taskfunc.__module__ = f.__module__ return taskfunc class _SuspendCondition(object): """Special class to manage task suspend condition.""" def __init__(self, lock=threading.RLock(), initial=0): self._cond = threading.Condition(lock) self.suspend_count = initial def atomic_inc(self): """Increase suspend count.""" self._cond.acquire() self.suspend_count += 1 self._cond.release() def atomic_dec(self): """Decrease suspend count.""" self._cond.acquire() self.suspend_count -= 1 self._cond.release() def wait_check(self, release_lock=None): """Wait for condition if needed.""" self._cond.acquire() try: if self.suspend_count > 0: if release_lock: release_lock.release() self._cond.wait() finally: self._cond.release() def notify_all(self): """Signal all threads waiting for condition.""" self._cond.acquire() try: self.suspend_count = min(self.suspend_count, 0) self._cond.notifyAll() finally: self._cond.release() def __new__(cls, thread=None): """ For task bound to a specific thread, this class acts like a "thread singleton", so new style class is used and new object are only instantiated if needed. """ if thread: if thread not in cls._tasks: cls._tasks[thread] = object.__new__(cls) return cls._tasks[thread] return object.__new__(cls) def __init__(self, thread=None): """ Initialize a Task, creating a new thread if needed. """ if not getattr(self, "_engine", None): # first time called self._default_lock = threading.Lock() self._default = self.__class__._std_default.copy() self._info = self.__class__._std_info.copy() # use factory class PreferredEngine that gives the proper # engine instance self._engine = PreferredEngine(self.default("engine"), self._info) self.timeout = 0 # task synchronization objects self._run_lock = threading.Lock() # primitive lock self._suspend_lock = threading.RLock() # reentrant lock # both join and suspend conditions share the same underlying lock self._suspend_cond = Task._SuspendCondition(self._suspend_lock, 1) self._join_cond = threading.Condition(self._suspend_lock) self._suspended = False self._quit = False # Default router self.topology = None self.router = None self.pwrks = {} self.pmwkrs = {} # STDIN tree self._msgtree = None # STDERR tree self._errtree = None # dict of sources to return codes self._d_source_rc = {} # dict of return codes to sources self._d_rc_sources = {} # keep max rc self._max_rc = 0 # keep timeout'd sources self._timeout_sources = set() # allow no-op call to getters before resume() self._reset() # special engine port for task method dispatching self._dispatch_port = EnginePort(self, handler=Task._SyncMsgHandler(), autoclose=True) self._engine.add(self._dispatch_port) # set taskid used as Thread name Task._task_lock.acquire() Task._taskid_max += 1 self._taskid = Task._taskid_max Task._task_lock.release() # create new thread if needed self._thread_foreign = bool(thread) if self._thread_foreign: self.thread = thread else: self.thread = thread = \ threading.Thread(None, Task._thread_start, "Task-%d" % self._taskid, args=(self,)) Task._tasks[thread] = self thread.start() def _is_task_self(self): """Private method used by the library to check if the task is task_self(), but do not create any task_self() instance.""" return self.thread == threading.currentThread() def default_excepthook(self, exc_type, exc_value, tb): """Default excepthook for a newly Task. When an exception is raised and uncaught on Task thread, excepthook is called, which is default_excepthook by default. Once excepthook overriden, you can still call default_excepthook if needed.""" print >> sys.stderr, 'Exception in thread %s:' % self.thread traceback.print_exception(exc_type, exc_value, tb, file=sys.stderr) _excepthook = default_excepthook def _getexcepthook(self): return self._excepthook def _setexcepthook(self, hook): self._excepthook = hook # If thread has not been created by us, install sys.excepthook which # might handle uncaught exception. if self._thread_foreign: sys.excepthook = self._excepthook # When an exception is raised and uncaught on Task's thread, # excepthook is called. You may want to override this three # arguments method (very similar of what you can do with # sys.excepthook).""" excepthook = property(_getexcepthook, _setexcepthook) def _thread_start(self): """Task-managed thread entry point""" while not self._quit: self._suspend_cond.wait_check() if self._quit: break try: self._resume() except: self.excepthook(*sys.exc_info()) self._quit = True self._terminate(kill=True) def _run(self, timeout): """Run task (always called from its self thread).""" # check if task is already running if self._run_lock.locked(): raise AlreadyRunningError("task is already running") # use with statement later try: self._run_lock.acquire() self._engine.run(timeout) finally: self._run_lock.release() def set_topology(self, topology_file): """Set new propagation topology from provided file.""" self.set_default("topology_file", topology_file) self.topology = self._default_topology() def _default_topology(self): try: parser = TopologyParser() parser.load(self.default("topology_file")) return parser.tree(_getshorthostname()) except TopologyError, exc: logging.getLogger(__name__).exception("_default_topology(): %s", \ str(exc)) raise return None def _default_router(self): if self.router is None: topology = self.topology self.router = PropagationTreeRouter(str(topology.root.nodeset), \ topology) return self.router def default(self, default_key, def_val=None): """ Return per-task value for key from the "default" dictionary. See set_default() for a list of reserved task default_keys. """ self._default_lock.acquire() try: return self._default.get(default_key, def_val) finally: self._default_lock.release() def set_default(self, default_key, value): """ Set task value for specified key in the dictionary "default". Users may store their own task-specific key, value pairs using this method and retrieve them with default(). Task default_keys are: - "stderr": Boolean value indicating whether to enable stdout/stderr separation when using task.shell(), if not specified explicitly (default: False). - "stdout_msgtree": Whether to enable standard output MsgTree for automatic internal gathering of result messages (default: True). - "stderr_msgtree": Same for stderr (default: True). - "engine": Used to specify an underlying Engine explicitly (default: "auto"). - "port_qlimit": Size of port messages queue (default: 32). Threading considerations ======================== Unlike set_info(), when called from the task's thread or not, set_default() immediately updates the underlying dictionary in a thread-safe manner. This method doesn't wake up the engine when called. """ self._default_lock.acquire() try: self._default[default_key] = value finally: self._default_lock.release() def info(self, info_key, def_val=None): """ Return per-task information. See set_info() for a list of reserved task info_keys. """ return self._info.get(info_key, def_val) @tasksyncmethod() def set_info(self, info_key, value): """ Set task value for a specific key information. Key, value pairs can be passed to the engine and/or workers. Users may store their own task-specific info key, value pairs using this method and retrieve them with info(). The following example changes the fanout value to 128: >>> task.set_info('fanout', 128) The following example enables debug messages: >>> task.set_info('debug', True) Task info_keys are: - "debug": Boolean value indicating whether to enable library debugging messages (default: False). - "print_debug": Debug messages processing function. This function takes 2 arguments: the task instance and the message string (default: an internal function doing standard print). - "fanout": Max number of registered clients in Engine at a time (default: 64). - "grooming_delay": Message maximum end-to-end delay requirement used for traffic grooming, in seconds as float (default: 0.5). - "connect_timeout": Time in seconds to wait for connecting to remote host before aborting (default: 10). - "command_timeout": Time in seconds to wait for a command to complete before aborting (default: 0, which means unlimited). Threading considerations ======================== Unlike set_default(), the underlying info dictionary is only modified from the task's thread. So calling set_info() from another thread leads to queueing the request for late apply (at run time) using the task dispatch port. When received, the request wakes up the engine when the task is running and the info dictionary is then updated. """ self._info[info_key] = value def shell(self, command, **kwargs): """ Schedule a shell command for local or distant parallel execution. This essential method creates a local or remote Worker (depending on the presence of the nodes parameter) and immediately schedules it for execution in task's runloop. So, if the task is already running (ie. called from an event handler), the command is started immediately, assuming current execution contraintes are met (eg. fanout value). If the task is not running, the command is not started but scheduled for late execution. See resume() to start task runloop. The following optional parameters are passed to the underlying local or remote Worker constructor: - handler: EventHandler instance to notify (on event) -- default is no handler (None) - timeout: command timeout delay expressed in second using a floating point value -- default is unlimited (None) - autoclose: if set to True, the underlying Worker is automatically aborted as soon as all other non-autoclosing task objects (workers, ports, timers) have finished -- default is False - stderr: separate stdout/stderr if set to True -- default is False. Local usage:: task.shell(command [, key=key] [, handler=handler] [, timeout=secs] [, autoclose=enable_autoclose] [, stderr=enable_stderr]) Distant usage:: task.shell(command, nodes=nodeset [, handler=handler] [, timeout=secs], [, autoclose=enable_autoclose] [, strderr=enable_stderr], [tree=None|False|True]) Example: >>> task = task_self() >>> task.shell("/bin/date", nodes="node[1-2345]") >>> task.resume() """ handler = kwargs.get("handler", None) timeo = kwargs.get("timeout", None) autoclose = kwargs.get("autoclose", False) stderr = kwargs.get("stderr", self.default("stderr")) if kwargs.get("nodes", None): assert kwargs.get("key", None) is None, \ "'key' argument not supported for distant command" tree = kwargs.get("tree") if tree and self.topology is None: raise TaskError("tree mode required for distant shell command" \ " with unknown topology!") if tree is None: # means auto tree = self.default("auto_tree") and (self.topology is not None) if tree: # create tree of ssh worker worker = WorkerTree(NodeSet(kwargs["nodes"]), command=command, handler=handler, stderr=stderr, timeout=timeo, autoclose=autoclose) else: # create ssh-based worker worker = WorkerSsh(NodeSet(kwargs["nodes"]), command=command, handler=handler, stderr=stderr, timeout=timeo, autoclose=autoclose) else: # create (local) worker worker = WorkerPopen(command, key=kwargs.get("key", None), handler=handler, stderr=stderr, timeout=timeo, autoclose=autoclose) # schedule worker for execution in this task self.schedule(worker) return worker def copy(self, source, dest, nodes, **kwargs): """ Copy local file to distant nodes. """ assert nodes != None, "local copy not supported" handler = kwargs.get("handler", None) stderr = kwargs.get("stderr", self.default("stderr")) timeo = kwargs.get("timeout", None) preserve = kwargs.get("preserve", None) reverse = kwargs.get("reverse", False) # create a new copy worker worker = WorkerSsh(nodes, source=source, dest=dest, handler=handler, stderr=stderr, timeout=timeo, preserve=preserve, reverse=reverse) self.schedule(worker) return worker def rcopy(self, source, dest, nodes, **kwargs): """ Copy distant file or directory to local node. """ kwargs['reverse'] = True return self.copy(source, dest, nodes, **kwargs) @tasksyncmethod() def _add_port(self, port): """Add an EnginePort instance to Engine (private method).""" self._engine.add(port) @tasksyncmethod() def _remove_port(self, port): """Remove a port from Engine (private method).""" self._engine.remove(port) def port(self, handler=None, autoclose=False): """ Create a new task port. A task port is an abstraction object to deliver messages reliably between tasks. Basic rules: - A task can send messages to another task port (thread safe). - A task can receive messages from an acquired port either by setting up a notification mechanism or using a polling mechanism that may block the task waiting for a message sent on the port. - A port can be acquired by one task only. If handler is set to a valid EventHandler object, the port is a send-once port, ie. a message sent to this port generates an ev_msg event notification issued the port's task. If handler is not set, the task can only receive messages on the port by calling port.msg_recv(). """ port = EnginePort(self, handler, autoclose) self._add_port(port) return port def timer(self, fire, handler, interval=-1.0, autoclose=False): """ Create a timer bound to this task that fires at a preset time in the future by invoking the ev_timer() method of `handler' (provided EventHandler object). Timers can fire either only once or repeatedly at fixed time intervals. Repeating timers can also have their next firing time manually adjusted. The mandatory parameter `fire' sets the firing delay in seconds. The optional parameter `interval' sets the firing interval of the timer. If not specified, the timer fires once and then is automatically invalidated. Time values are expressed in second using floating point values. Precision is implementation (and system) dependent. The optional parameter `autoclose', if set to True, creates an "autoclosing" timer: it will be automatically invalidated as soon as all other non-autoclosing task's objects (workers, ports, timers) have finished. Default value is False, which means the timer will retain task's runloop until it is invalidated. Return a new EngineTimer instance. See ClusterShell.Engine.Engine.EngineTimer for more details. """ assert fire >= 0.0, \ "timer's relative fire time must be a positive floating number" timer = EngineTimer(fire, interval, autoclose, handler) # The following method may be sent through msg port (async # call) if called from another task. self._add_timer(timer) # always return new timer (sync) return timer @tasksyncmethod() def _add_timer(self, timer): """Add a timer to task engine (thread-safe).""" self._engine.add_timer(timer) @tasksyncmethod() def schedule(self, worker): """ Schedule a worker for execution, ie. add worker in task running loop. Worker will start processing immediately if the task is running (eg. called from an event handler) or as soon as the task is started otherwise. Only useful for manually instantiated workers, for example: >>> task = task_self() >>> worker = WorkerSsh("node[2-3]", None, 10, command="/bin/ls") >>> task.schedule(worker) >>> task.resume() """ assert self in Task._tasks.values(), "deleted task" # bind worker to task self worker._set_task(self) # add worker clients to engine for client in worker._engine_clients(): self._engine.add(client) def _resume_thread(self): """Resume task - called from another thread.""" self._suspend_cond.notify_all() def _resume(self): """Resume task - called from self thread.""" assert self.thread == threading.currentThread() try: try: self._reset() self._run(self.timeout) except EngineTimeoutException: raise TimeoutError() except EngineAbortException, exc: self._terminate(exc.kill) except EngineAlreadyRunningError: raise AlreadyRunningError("task engine is already running") finally: # task becomes joinable self._join_cond.acquire() self._suspend_cond.suspend_count += 1 self._join_cond.notifyAll() self._join_cond.release() def resume(self, timeout=0): """ Resume task. If task is task_self(), workers are executed in the calling thread so this method will block until all (non-autoclosing) workers have finished. This is always the case for a single-threaded application (eg. which doesn't create other Task() instance than task_self()). Otherwise, the current thread doesn't block. In that case, you may then want to call task_wait() to wait for completion. Warning: the timeout parameter can be used to set an hard limit of task execution time (in seconds). In that case, a TimeoutError exception is raised if this delay is reached. Its value is 0 by default, which means no task time limit (TimeoutError is never raised). In order to set a maximum delay for individual command execution, you should use Task.shell()'s timeout parameter instead. """ # If you change options here, check Task.run() compatibility. self.timeout = timeout self._suspend_cond.atomic_dec() if self._is_task_self(): self._resume() else: self._resume_thread() def run(self, command=None, **kwargs): """ With arguments, it will schedule a command exactly like a Task.shell() would have done it and run it. This is the easiest way to simply run a command. >>> task.run("hostname", nodes="foo") Without argument, it starts all outstanding actions. It behaves like Task.resume(). >>> task.shell("hostname", nodes="foo") >>> task.shell("hostname", nodes="bar") >>> task.run() When used with a command, you can set a maximum delay of individual command execution with the help of the timeout parameter (see Task.shell's parameters). You can then listen for ev_timeout() events in your Worker event handlers, or use num_timeout() or iter_keys_timeout() afterwards. But, when used as an alias to Task.resume(), the timeout parameter sets an hard limit of task execution time. In that case, a TimeoutError exception is raised if this delay is reached. """ worker = None timeout = 0 # Both resume() and shell() support a 'timeout' parameter. We need a # trick to behave correctly for both cases. # # Here, we mock: task.resume(10) if type(command) in (int, float): timeout = command command = None # Here, we mock: task.resume(timeout=10) elif 'timeout' in kwargs and command is None: timeout = kwargs.pop('timeout') # All other cases mean a classical: shell(...) # we mock: task.shell("mycommand", [timeout=..., ...]) elif command is not None: worker = self.shell(command, **kwargs) self.resume(timeout) return worker @tasksyncmethod() def _suspend_wait(self): """Suspend request received.""" assert task_self() == self # atomically set suspend state self._suspend_lock.acquire() self._suspended = True self._suspend_lock.release() # wait for special suspend condition, while releasing l_run self._suspend_cond.wait_check(self._run_lock) # waking up, atomically unset suspend state self._suspend_lock.acquire() self._suspended = False self._suspend_lock.release() def suspend(self): """ Suspend task execution. This method may be called from another task (thread-safe). The function returns False if the task cannot be suspended (eg. it's not running), or returns True if the task has been successfully suspended. To resume a suspended task, use task.resume(). """ # first of all, increase suspend count self._suspend_cond.atomic_inc() # call synchronized suspend method self._suspend_wait() # wait for stopped task self._run_lock.acquire() # run_lock ownership transfer # get result: are we really suspended or just stopped? result = True self._suspend_lock.acquire() if not self._suspended: # not acknowledging suspend state, task is stopped result = False self._run_lock.release() self._suspend_lock.release() return result @tasksyncmethod() def _abort(self, kill=False): """Abort request received.""" assert task_self() == self # raise an EngineAbortException when task is running self._engine.abort(kill) def abort(self, kill=False): """ Abort a task. Aborting a task removes (and stops when needed) all workers. If optional parameter kill is True, the task object is unbound from the current thread, so calling task_self() creates a new Task object. """ if self._run_lock.acquire(0): self._quit = True self._run_lock.release() if self._is_task_self(): self._terminate(kill) else: # abort on stopped/suspended task self.resume() else: # self._run_lock is locked, call synchronized method self._abort(kill) def _terminate(self, kill): """ Abort completion subroutine. """ if kill: # invalidate dispatch port self._dispatch_port = None # clear engine self._engine.clear(clear_ports=kill) # clear result objects self._reset() # destroy task if needed if kill: Task._task_lock.acquire() try: del Task._tasks[threading.currentThread()] finally: Task._task_lock.release() def join(self): """ Suspend execution of the calling thread until the target task terminates, unless the target task has already terminated. """ self._join_cond.acquire() try: if self._suspend_cond.suspend_count > 0: if not self._suspended: # ignore stopped task return self._join_cond.wait() finally: self._join_cond.release() def running(self): """ Return True if the task is running. """ return self._engine.running def _reset(self): """ Reset buffers and retcodes management variables. """ # check and reset stdout MsgTree if self.default("stdout_msgtree"): if not self._msgtree: self._msgtree = MsgTree() self._msgtree.clear() else: self._msgtree = None # check and reset stderr MsgTree if self.default("stderr_msgtree"): if not self._errtree: self._errtree = MsgTree() self._errtree.clear() else: self._errtree = None # other re-init's self._d_source_rc = {} self._d_rc_sources = {} self._max_rc = 0 self._timeout_sources.clear() def _msg_add(self, source, msg): """ Add a worker message associated with a source. """ msgtree = self._msgtree if msgtree is not None: msgtree.add(source, msg) def _errmsg_add(self, source, msg): """ Add a worker error message associated with a source. """ errtree = self._errtree if errtree is not None: errtree.add(source, msg) def _rc_set(self, source, rc, override=True): """ Add a worker return code associated with a source. """ if not override and self._d_source_rc.has_key(source): return # store rc by source self._d_source_rc[source] = rc # store source by rc self._d_rc_sources.setdefault(rc, set()).add(source) # update max rc if rc > self._max_rc: self._max_rc = rc def _timeout_add(self, source): """ Add a worker timeout associated with a source. """ # store source in timeout set self._timeout_sources.add(source) def _msg_by_source(self, source): """ Get a message by its source (worker, key). """ if self._msgtree is None: raise TaskMsgTreeError("stdout_msgtree not set") s = self._msgtree.get(source) if s is None: return None return str(s) def _errmsg_by_source(self, source): """ Get an error message by its source (worker, key). """ if self._errtree is None: raise TaskMsgTreeError("stderr_msgtree not set") s = self._errtree.get(source) if s is None: return None return str(s) def _call_tree_matcher(self, tree_match_func, match_keys=None, worker=None): """Call identified tree matcher (items, walk) method with options.""" # filter by worker and optionally by matching keys if worker and not match_keys: match = lambda k: k[0] is worker elif worker and match_keys: match = lambda k: k[0] is worker and k[1] in match_keys elif match_keys: match = lambda k: k[1] in match_keys else: match = None # Call tree matcher function (items or walk) return tree_match_func(match, itemgetter(1)) def _rc_by_source(self, source): """ Get a return code by its source (worker, key). """ return self._d_source_rc[source] def _rc_iter_by_key(self, key): """ Return an iterator over return codes for the given key. """ for (w, k), rc in self._d_source_rc.iteritems(): if k == key: yield rc def _rc_iter_by_worker(self, worker, match_keys=None): """ Return an iterator over return codes and keys list for a specific worker and optional matching keys. """ if match_keys: # Use the items iterator for the underlying dict. for rc, src in self._d_rc_sources.iteritems(): keys = [t[1] for t in src if t[0] is worker and \ t[1] in match_keys] if len(keys) > 0: yield rc, keys else: for rc, src in self._d_rc_sources.iteritems(): keys = [t[1] for t in src if t[0] is worker] if len(keys) > 0: yield rc, keys def _krc_iter_by_worker(self, worker): """ Return an iterator over key, rc for a specific worker. """ for rc, src in self._d_rc_sources.iteritems(): for w, k in src: if w is worker: yield k, rc def _num_timeout_by_worker(self, worker): """ Return the number of timed out "keys" for a specific worker. """ cnt = 0 for (w, k) in self._timeout_sources: if w is worker: cnt += 1 return cnt def _iter_keys_timeout_by_worker(self, worker): """ Iterate over timed out keys (ie. nodes) for a specific worker. """ for (w, k) in self._timeout_sources: if w is worker: yield k def _flush_buffers_by_worker(self, worker): """ Remove any messages from specified worker. """ if self._msgtree is not None: self._msgtree.remove(lambda k: k[0] == worker) def _flush_errors_by_worker(self, worker): """ Remove any error messages from specified worker. """ if self._errtree is not None: self._errtree.remove(lambda k: k[0] == worker) def key_buffer(self, key): """ Get buffer for a specific key. When the key is associated to multiple workers, the resulting buffer will contain all workers content that may overlap. This method returns an empty buffer if key is not found in any workers. """ msgtree = self._msgtree if msgtree is None: raise TaskMsgTreeError("stdout_msgtree not set") select_key = lambda k: k[1] == key return "".join(imap(str, msgtree.messages(select_key))) node_buffer = key_buffer def key_error(self, key): """ Get error buffer for a specific key. When the key is associated to multiple workers, the resulting buffer will contain all workers content that may overlap. This method returns an empty error buffer if key is not found in any workers. """ errtree = self._errtree if errtree is None: raise TaskMsgTreeError("stderr_msgtree not set") select_key = lambda k: k[1] == key return "".join(imap(str, errtree.messages(select_key))) node_error = key_error def key_retcode(self, key): """ Return return code for a specific key. When the key is associated to multiple workers, return the max return code from these workers. Raises a KeyError if key is not found in any finished workers. """ codes = list(self._rc_iter_by_key(key)) if not codes: raise KeyError(key) return max(codes) node_retcode = key_retcode def max_retcode(self): """ Get max return code encountered during last run. How retcodes work ================= If the process exits normally, the return code is its exit status. If the process is terminated by a signal, the return code is 128 + signal number. """ return self._max_rc def iter_buffers(self, match_keys=None): """ Iterate over buffers, returns a tuple (buffer, keys). For remote workers (Ssh), keys are list of nodes. In that case, you should use NodeSet.fromlist(keys) to get a NodeSet instance (which is more convenient and efficient): Optional parameter match_keys add filtering on these keys. Usage example: >>> for buffer, nodelist in task.iter_buffers(): ... print NodeSet.fromlist(nodelist) ... print buffer """ msgtree = self._msgtree if msgtree is None: raise TaskMsgTreeError("stdout_msgtree not set") return self._call_tree_matcher(msgtree.walk, match_keys) def iter_errors(self, match_keys=None): """ Iterate over error buffers, returns a tuple (buffer, keys). See iter_buffers(). """ errtree = self._errtree if errtree is None: raise TaskMsgTreeError("stderr_msgtree not set") return self._call_tree_matcher(errtree.walk, match_keys) def iter_retcodes(self, match_keys=None): """ Iterate over return codes, returns a tuple (rc, keys). Optional parameter match_keys add filtering on these keys. How retcodes work ================= If the process exits normally, the return code is its exit status. If the process is terminated by a signal, the return code is 128 + signal number. """ if match_keys: # Use the items iterator for the underlying dict. for rc, src in self._d_rc_sources.iteritems(): keys = [t[1] for t in src if t[1] in match_keys] yield rc, keys else: for rc, src in self._d_rc_sources.iteritems(): yield rc, [t[1] for t in src] def num_timeout(self): """ Return the number of timed out "keys" (ie. nodes). """ return len(self._timeout_sources) def iter_keys_timeout(self): """ Iterate over timed out keys (ie. nodes). """ for (w, k) in self._timeout_sources: yield k def flush_buffers(self): """ Flush all task messages (from all task workers). """ if self._msgtree is not None: self._msgtree.clear() def flush_errors(self): """ Flush all task error messages (from all task workers). """ if self._errtree is not None: self._errtree.clear() @classmethod def wait(cls, from_thread): """ Class method that blocks calling thread until all tasks have finished (from a ClusterShell point of view, for instance, their task.resume() return). It doesn't necessarly mean that associated threads have finished. """ Task._task_lock.acquire() try: tasks = Task._tasks.copy() finally: Task._task_lock.release() for thread, task in tasks.iteritems(): if thread != from_thread: task.join() def pchannel(self, gateway, metaworker): #gw_invoke_cmd): """Get propagation channel for gateway (create one if needed)""" # create channel if needed if gateway not in self.pwrks: chan = PropagationChannel(self) # invoke gateway timeout = 0 worker = self.shell(metaworker.invoke_gateway, nodes=gateway, handler=chan, timeout=timeout, tree=False) self.pwrks[gateway] = worker else: worker = self.pwrks[gateway] chan = worker.eh if metaworker not in self.pmwkrs: mw = self.pmwkrs[metaworker] = set() else: mw = self.pmwkrs[metaworker] if worker not in mw: #print >>sys.stderr, "pchannel++" worker.metarefcnt += 1 mw.add(worker) return chan def _pchannel_release(self, metaworker): """Release propagation channel""" if metaworker in self.pmwkrs: for worker in self.pmwkrs[metaworker]: #print >>sys.stderr, "pchannel_release2 %s" % worker worker.metarefcnt -= 1 if worker.metarefcnt == 0: #print >>sys.stderr, "worker abort" worker.eh._close() #worker.abort() def task_self(): """ Return the current Task object, corresponding to the caller's thread of control (a Task object is always bound to a specific thread). This function provided as a convenience is available in the top-level ClusterShell.Task package namespace. """ return Task(thread=threading.currentThread()) def task_wait(): """ Suspend execution of the calling thread until all tasks terminate, unless all tasks have already terminated. This function is provided as a convenience and is available in the top-level ClusterShell.Task package namespace. """ Task.wait(threading.currentThread()) def task_terminate(): """ Destroy the Task instance bound to the current thread. A next call to task_self() will create a new Task object. This function provided as a convenience is available in the top-level ClusterShell.Task package namespace. """ task_self().abort(kill=True) def task_cleanup(): """ Cleanup routine to destroy all created tasks. This function provided as a convenience is available in the top-level ClusterShell.Task package namespace. This is mainly used for testing purposes and should be avoided otherwise. task_cleanup() may be called from any threads. """ # be sure to return to a clean state (no task at all) while True: Task._task_lock.acquire() try: tasks = Task._tasks.copy() if len(tasks) == 0: break finally: Task._task_lock.release() # send abort to all known tasks (it's needed to retry as we may have # missed the engine notification window (it was just exiting, which is # quite a common case if we didn't task_join() previously), or we may # have lost some task's dispatcher port messages. for task in tasks.itervalues(): task.abort(kill=True) # also, for other task than self, task.abort() is async and performed # through an EngineAbortException, so tell the Python scheduler to give # up control to raise this exception (handled by task._terminate())... sleep(0.001)