lava-coordinator-0.1.7/0000755000175000017500000000000012546502022014661 5ustar neilneil00000000000000lava-coordinator-0.1.7/setup.cfg0000644000175000017500000000007312546502022016502 0ustar neilneil00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 lava-coordinator-0.1.7/PKG-INFO0000644000175000017500000000045112546502022015756 0ustar neilneil00000000000000Metadata-Version: 1.0 Name: lava-coordinator Version: 0.1.7 Summary: LAVA Coordinator daemon for MultiNode Home-page: http://www.linaro.org/engineering/engineering-groups/validation Author: Neil Williams Author-email: neil.williams@linaro.org License: GPL2+ Description: UNKNOWN Platform: UNKNOWN lava-coordinator-0.1.7/etc/0000755000175000017500000000000012546502022015434 5ustar neilneil00000000000000lava-coordinator-0.1.7/etc/lava-coordinator.conf0000644000175000017500000000015212213320555021545 0ustar neilneil00000000000000{ "port": 3079, "blocksize": 4096, "poll_delay": 3, "coordinator_hostname": "localhost" } lava-coordinator-0.1.7/etc/lava-coordinator.service0000644000175000017500000000034312545245263022274 0ustar neilneil00000000000000[Unit] Description=Coordinator daemon for LAVA MultiNode messaging API After=remote-fs.target [Service] ExecStart=/usr/bin/lava-coordinator Type=forking PIDFile=/var/run/lava-coordinator.pid [Install] WantedBy=network.target lava-coordinator-0.1.7/etc/lavacoordinatorlog0000644000175000017500000000017312472351711021256 0ustar neilneil00000000000000/var/log/lava-coordinator.log { weekly rotate 12 compress delaycompress missingok notifempty create 644 root root } lava-coordinator-0.1.7/etc/lava-coordinator.init0000755000175000017500000001026512213320555021574 0ustar neilneil00000000000000#!/bin/sh ### BEGIN INIT INFO # Provides: lava-coordinator # Required-Start: $network $remote_fs # Required-Stop: $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: LAVA MultiNode Coordinator singleton # Description: Coordinator daemon for LAVA MultiNode messaging API ### END INIT INFO # Author: Neil Williams LOGFILE="--logfile /var/log/lava-server/lava-coordinator.log" LOGLEVEL="--loglevel=DEBUG" # PATH should only include /usr/* if it runs after the mountnfs.sh script PATH=/sbin:/usr/sbin:/bin:/usr/bin DESC="lava-coordinator" # short description NAME=lava-coordinato # short server's name (truncated for 15 chars) DAEMON=/usr/bin/lava-coordinator # server's location DAEMON_ARGS="$LOGLEVEL" # Arguments to run the daemon with PIDFILE=/var/run/$DESC.pid SCRIPTNAME=/etc/init.d/$NAME # Exit if the package is not installed [ -x $DAEMON ] || exit 0 # Read configuration variable file if it is present [ -r /etc/default/$NAME ] && . /etc/default/$NAME # Load the VERBOSE setting and other rcS variables #. /lib/init/vars.sh # Define LSB log_* functions. # Depend on lsb-base (>= 3.0-6) to ensure that this file is present. . /lib/lsb/init-functions # # Function that starts the daemon/service # do_start() { # Return # 0 if daemon has been started # 1 if daemon was already running # 2 if daemon could not be started start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \ || return 1 start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \ $DAEMON_ARGS \ || return 2 # Add code here, if necessary, that waits for the process to be ready # to handle requests from services started subsequently which depend # on this one. As a last resort, sleep for some time. } # # Function that stops the daemon/service # do_stop() { # Return # 0 if daemon has been stopped # 1 if daemon was already stopped # 2 if daemon could not be stopped # other if a failure occurred start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME RETVAL="$?" [ "$RETVAL" = 2 ] && return 2 # Wait for children to finish too if this is a daemon that forks # and if the daemon is only ever run from this initscript. # If the above conditions are not satisfied then add some other code # that waits for the process to drop all resources that could be # needed by services started subsequently. A last resort is to # sleep for some time. # Many daemons don't delete their pidfiles when they exit. rm -f $PIDFILE return "$RETVAL" } # # Function that sends a SIGHUP to the daemon/service # do_reload() { # # If the daemon can reload its configuration without # restarting (for example, when it is sent a SIGHUP), # then implement that here. # start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME return 0 } case "$1" in start) [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC " "$NAME" do_start case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; stop) [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" do_stop case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; esac ;; status) status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? ;; #reload|force-reload) # # If do_reload() is not implemented then leave this commented out # and leave 'force-reload' as an alias for 'restart'. # #log_daemon_msg "Reloading $DESC" "$NAME" #do_reload #log_end_msg $? #;; restart|force-reload) # # If the "reload" option is implemented then remove the # 'force-reload' alias # log_daemon_msg "Restarting $DESC" "$NAME" do_stop case "$?" in 0|1) do_start case "$?" in 0) log_end_msg 0 ;; 1) log_end_msg 1 ;; # Old process is still running *) log_end_msg 1 ;; # Failed to start esac ;; *) # Failed to stop log_end_msg 1 ;; esac ;; *) #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2 echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 exit 3 ;; esac lava-coordinator-0.1.7/setup.py0000644000175000017500000000316612546501761016412 0ustar neilneil00000000000000#! /usr/bin/env python # # Copyright (C) 2013 Linaro Limited # # Author: Neil Williams # # This file is part of LAVA Coordinator. # # LAVA Coordinator is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # LAVA Coordinator is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . from setuptools import setup, find_packages setup( name='lava-coordinator', version="0.1.7", author="Neil Williams", author_email="neil.williams@linaro.org", license="GPL2+", description="LAVA Coordinator daemon for MultiNode", url='http://www.linaro.org/engineering/engineering-groups/validation', packages=find_packages(), install_requires=[ "daemon", "lockfile", ], data_files=[ ("/etc/init.d/", ["etc/lava-coordinator.init"]), ("/usr/share/lava-coordinator/", [ "status.py", "etc/lava-coordinator.service" ]), ("/etc/lava-coordinator/", ["etc/lava-coordinator.conf"]), ("/etc/logrotate.d/", ["etc/lavacoordinatorlog"]) ], scripts=[ 'lava-coordinator' ], zip_safe=False, include_package_data=True) lava-coordinator-0.1.7/MANIFEST.in0000644000175000017500000000033112545245263016426 0ustar neilneil00000000000000include etc/lava-coordinator.init include etc/lava-coordinator.conf include etc/lava-coordinator.service include etc/lavacoordinatorlog include status.py include tests/* #include coordinator.py #include testpoller.py lava-coordinator-0.1.7/lava-coordinator0000644000175000017500000001211012546501774020060 0ustar neilneil00000000000000#! /usr/bin/python # Copyright 2013 Linaro Limited # Author Neil Williams # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import logging import json import os import sys import optparse import daemon try: import daemon.pidlockfile as pidlockfile except ImportError: from lockfile import pidlockfile from logging.handlers import WatchedFileHandler from lava.coordinator import LavaCoordinator def getDaemonLogger(filePath, log_format=None, loglevel=logging.INFO): logger = logging.getLogger() logger.setLevel(loglevel) try: watchedHandler = WatchedFileHandler(filePath) except Exception as e: return e watchedHandler.setFormatter(logging.Formatter(log_format or '%(asctime)s %(msg)s')) logger.addHandler(watchedHandler) return logger, watchedHandler def readSettings(filename): """ NodeDispatchers need to use the same port and blocksize as the Coordinator, so read the same conffile. """ settings = {"port": 3079, "coordinator_hostname": "localhost", "blocksize": 4 * 1024} with open(filename) as stream: jobdata = stream.read() json_default = json.loads(jobdata) if "port" in json_default: settings['port'] = json_default['port'] if "blocksize" in json_default: settings['blocksize'] = json_default["blocksize"] if "coordinator_hostname" in json_default: settings['coordinator_hostname'] = json_default['coordinator_hostname'] return settings if __name__ == '__main__': # instance settings come from django - the coordinator doesn't use django and is # not necessarily per-instance, so use the command line and a default conf file. pidfile = "/var/run/lava-coordinator.pid" logfile = "/var/log/lava-coordinator.log" conffile = "/etc/lava-coordinator/lava-coordinator.conf" settings = readSettings(conffile) usage = "Usage: %prog [--logfile] --[loglevel]" description = "LAVA Coordinator singleton for LAVA (MultiNode support). The singleton " \ "can support multiple instances. If more than one " \ "Coordinator exists on one machine, each must use a unique port " \ "and should probably use a unique log-file. The directory specified for " \ "the logfile must exist or the default will be used instead." \ "Port number and blocksize are handled in %s" % conffile parser = optparse.OptionParser(usage=usage, description=description) parser.add_option("--logfile", dest="logfile", action="store", type="string", help="log file for the LAVA Coordinator daemon [%s]" % logfile) parser.add_option("--loglevel", dest="loglevel", action="store", type="string", help="logging level [INFO]") (options, args) = parser.parse_args() if options.logfile: if os.path.exists(os.path.dirname(options.logfile)): logfile = options.logfile else: print "No such directory for specified logfile '%s'" % logfile open(logfile, 'w').close() level = logging.INFO if options.loglevel == "DEBUG": level = logging.DEBUG if options.loglevel == "WARNING": level = logging.WARNING if options.loglevel == "ERROR": level = logging.ERROR client_logger, watched_file_handler = getDaemonLogger(logfile, loglevel=level) if isinstance(client_logger, Exception): print("Fatal error creating client_logger: " + str(client_logger)) sys.exit(os.EX_OSERR) # noinspection PyArgumentList lockfile = pidlockfile.PIDLockFile(pidfile) if lockfile.is_locked(): logging.error("PIDFile %s already locked" % pidfile) sys.exit(os.EX_OSERR) context = daemon.DaemonContext( detach_process=True, working_directory=os.getcwd(), pidfile=lockfile, files_preserve=[watched_file_handler.stream], stderr=watched_file_handler.stream, stdout=watched_file_handler.stream) starter = {"coordinator": True, "logging_level": options.loglevel, "host": settings['coordinator_hostname'], "port": settings['port'], "blocksize": settings['blocksize']} with context: logging.info("Running LAVA Coordinator %s %s %d." % (logfile, settings['coordinator_hostname'], settings['port'])) LavaCoordinator(starter).run() lava-coordinator-0.1.7/status.py0000755000175000017500000001212412510166415016564 0ustar neilneil00000000000000#! /usr/bin/python """ Status check for lava-coordinator """ # Copyright 2015 Linaro Limited # Author Neil Williams # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import os import sys import socket import json import time import errno from socket import gethostname HOST = 'localhost' # The coordinator hostname default def read_settings(filename): """ NodeDispatchers need to use the same port and blocksize as the Coordinator, so read the same conffile. """ settings = {"port": 3079, "coordinator_hostname": "localhost", "blocksize": 4 * 1024} if not os.path.exists(filename): # unknown as there is no usable configuration print "No lava-coordinator configuration file found!" sys.exit(3) with open(filename) as stream: jobdata = stream.read() json_default = json.loads(jobdata) if "port" in json_default: settings['port'] = json_default['port'] if "blocksize" in json_default: settings['blocksize'] = json_default["blocksize"] if "coordinator_hostname" in json_default: settings['coordinator_hostname'] = json_default['coordinator_hostname'] return settings # pylint: disable=too-many-branches,too-many-statements,too-many-locals def lava_poll(port, host, name, request): """ Modified poll equivalent """ errors = [] warnings = [] while True: sock = None count = 0 while count < 5: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.connect((host, port)) break except socket.error as exc: if exc.errno == errno.ECONNRESET: warnings.append("connection reset by peer: bug 1020") errors.append("not connected, sleeping for 1 second") time.sleep(1) sock = None count += 1 warnings.append("retrying port %s on %s" % (port, host)) if count >= 5: break msg = { "group_name": "group1", "group_size": 2, "hostname": gethostname(), "role": "client", "client_name": name, "request": request, "message": None } msg_str = json.dumps(msg) msg_len = len(msg_str) try: # send the length as 32bit hexadecimal ret_bytes = sock.send("%08X" % msg_len) if ret_bytes == 0: warnings.append( "zero bytes sent for length - connection closed?") continue ret_bytes = sock.send(msg_str) if ret_bytes == 0: warnings.append( "zero bytes sent for message - connection closed?") continue except socket.error as exc: errors.append("socket error '%d' on send" % exc.message) sock.close() continue try: data = str(sock.recv(8)) # 32bit limit data = sock.recv(1024) except socket.error as exc: errors.append("Exception on receive: %s" % exc) continue try: json_data = json.loads(data) except ValueError: warnings.append("data not JSON %s" % data) break if 'response' not in json_data: errors.append("no response field in data") break if json_data['response'] != 'wait': break else: break sock.shutdown(socket.SHUT_RDWR) sock.close() ret = 0 if errors: ret = 2 elif warnings: ret = 1 if errors or warnings: print "E:%s W:%s" % (errors, warnings) return ret else: return ret def main(): """ Run a simple check on the API """ port = 3079 # The same port as used by the server host = 'localhost' conffile = "/etc/lava-coordinator/lava-coordinator.conf" settings = read_settings(conffile) port = settings['port'] host = settings['coordinator_hostname'] ret1 = lava_poll(port, host, 'status', 'group_data') ret2 = lava_poll(port, host, 'status', 'clear_group') if not ret1 and not ret2: print "status check complete. No errors" if ret1 and ret1 >= ret2: sys.exit(ret1) if ret2: sys.exit(ret2) if __name__ == '__main__': main() lava-coordinator-0.1.7/lava_coordinator.egg-info/0000755000175000017500000000000012546502022021701 5ustar neilneil00000000000000lava-coordinator-0.1.7/lava_coordinator.egg-info/not-zip-safe0000644000175000017500000000000112446055652024142 0ustar neilneil00000000000000 lava-coordinator-0.1.7/lava_coordinator.egg-info/PKG-INFO0000644000175000017500000000045112546502022022776 0ustar neilneil00000000000000Metadata-Version: 1.0 Name: lava-coordinator Version: 0.1.7 Summary: LAVA Coordinator daemon for MultiNode Home-page: http://www.linaro.org/engineering/engineering-groups/validation Author: Neil Williams Author-email: neil.williams@linaro.org License: GPL2+ Description: UNKNOWN Platform: UNKNOWN lava-coordinator-0.1.7/lava_coordinator.egg-info/SOURCES.txt0000644000175000017500000000072312546502022023567 0ustar neilneil00000000000000MANIFEST.in lava-coordinator setup.py status.py etc/lava-coordinator.conf etc/lava-coordinator.init etc/lava-coordinator.service etc/lavacoordinatorlog lava/__init__.py lava/coordinator.py lava_coordinator.egg-info/PKG-INFO lava_coordinator.egg-info/SOURCES.txt lava_coordinator.egg-info/dependency_links.txt lava_coordinator.egg-info/not-zip-safe lava_coordinator.egg-info/requires.txt lava_coordinator.egg-info/top_level.txt tests/testpoller.py tests/testpoller.pyclava-coordinator-0.1.7/lava_coordinator.egg-info/top_level.txt0000644000175000017500000000000512546502022024426 0ustar neilneil00000000000000lava lava-coordinator-0.1.7/lava_coordinator.egg-info/dependency_links.txt0000644000175000017500000000000112546502022025747 0ustar neilneil00000000000000 lava-coordinator-0.1.7/lava_coordinator.egg-info/requires.txt0000644000175000017500000000002012546502022024271 0ustar neilneil00000000000000daemon lockfile lava-coordinator-0.1.7/tests/0000755000175000017500000000000012546502022016023 5ustar neilneil00000000000000lava-coordinator-0.1.7/tests/testpoller.py0000644000175000017500000010047212546501774020613 0ustar neilneil00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # testpoller.py # # Copyright 2013 Neil Williams # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # import unittest import logging import sys import uuid import json from lava.coordinator import LavaCoordinator bundle_sample = { "test_runs": [ {"software_context": { "sources": [ {"branch_url": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git", "branch_vcs": "git", "project_name": "multinode-yaml", "branch_revision": "f61e707c6d3da75d90735a75e6dc6aca55f1142b"}], "image": {"name": ""}, "packages": [ {"version": "1:2.0.16-1+deb7u1", "name": "acpid"}, {"version": "1:1.2.7.dfsg-13", "name": "zlib1g"}]}, "attachments": [ {"content": "", "pathname": "stdout.log", "mime_type": "text/plain"}, {"content": "", "pathname": "testdef.yaml", "mime_type": "text/plain"}, {"content": "MAo=", "pathname": "return_code", "mime_type": "text/plain"}, {"content": "", "pathname": "run.sh", "mime_type": "text/plain"}], "analyzer_assigned_date": "2013-08-13T19:27:41Z", "time_check_performed": False, "test_results": [ {"result": "pass", "attributes": {}, "attachments": [], "test_case_id": "linux-linaro-ubuntu-pwd"}, {"result": "pass", "attributes": {}, "attachments": [], "test_case_id": "multinode-role-output"}, {"result": "pass", "attributes": {}, "attachments": [], "test_case_id": "multinode-lava-network"}], "testdef_metadata": { "description": "Basic MultiNode test commands for Linux Linaro ubuntu Images", "format": "Lava-Test Test Definition 1.0", "url": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git", "version": "f61e707c6d3da75d90735a75e6dc6aca55f1142b", "location": "GIT"}, "hardware_context": { "devices": [ {"attributes": {"power management": "", "cpuid level": "4", "model": "2", "wp": "yes"}, "description": "Processor #0"}]}, "analyzer_assigned_uuid": "2eb4898b-ba33-42e1-ab71-025f18feef81", "attributes": {"target_group": "da4ed985-80e9-43bf-acd5-4e18d03300b9", "target": "multinode-kvm01", "target.hostname": "multinode-kvm01", "target.device_version": "1.0", "role": "felix", "target.device_type": "kvm", "logging_level": "DEBUG", "group_size": "2"}, "test_id": "smoke-tests-multinode"}], "format": "Dashboard Bundle Format 1.6"} class TestSignals(object): message_str = '' def formatString(self, reply): if type(reply) is dict: for target, messages in reply.items(): for key, value in messages.items(): self.message_str += " %s:%s=%s" % (target, key, value) return self.message_str def checkMessage(self, reply): if reply is not None: self.log = logging.getLogger("testCase") self.log.info("\t" % self.formatString(reply)) class TestSocket(object): response = None header = True log = None message = None passes = 0 signalHandler = None def __init__(self): self.log = logging.getLogger("testCase") self.signalHandler = TestSignals() def send(self, data): if self.header: self.header = False assert(int(data, 16) < 0xFFFE) self.log.info("\tCoordinator header: %d bytes" % int(data, 16)) else: try: json_data = json.loads(data) except ValueError: assert False if not self.response: assert(json_data['response'] == "nack") self.header = True return assert 'response' in json_data self.log.info("\tCoordinator response: '%s'" % json_data['response']) self.log.info("\tdebug: %s" % json.dumps(json_data)) assert(json_data['response'] == self.response) self.passes += 1 if self.message: # we are expecting a message back. assert 'message' in json_data self.log.info("\tCoordinator received a message: '%s'" % (json.dumps(json_data['message']))) assert(json_data['message'] == self.message) self.passes += 1 else: # actual calls will discriminate between dict and string replies # according to the call prototype itself if "message" in json_data: if type(json_data['message']) is dict: self.log.info("\tCould have expected a message: '%s'" % json.dumps(json_data['message'])) else: self.log.info("\t" % json_data['message']) self.passes += 1 self.header = True def close(self): self.log.info("\tCoordinator closing.") def clearPasses(self): self.passes = 0 def logPasses(self): if self.passes == 1: self.log.info("\tCoordinator: %d socket test passed" % self.passes) else: self.log.info("\tCoordinator: %d socket tests passed" % self.passes) def prepare(self, name): self.response = name if self.response: self.log.info("\tCoordinator: expecting a response: '%s'" % self.response) def validate(self, message): self.message = message if self.message: self.log.info("\tCoordinator: expecting a message: '%s'" % json.dumps(self.message)) self.signalHandler.checkMessage(self.message) class TestCoordinator(LavaCoordinator): running = True json_data = None group_name = None group_size = 0 client_name = None conn = None log = None def __init__(self): super(LavaCoordinator, self).__init__() self.group_name = str(uuid.uuid4()) self.conn = TestSocket() self.log = logging.getLogger("testCase") self.log.info("") self.json_data = {"request": "testing"} self.client_name = "testpoller" self.log.info("\tStarting test with %s %d %d %s" % (json.dumps(self.json_data), self.rpc_delay, self.blocksize, self.host)) self.expectResponse(None) def newGroup(self, size): self.group_name = str(uuid.uuid4()) self.group_size = size self.log = logging.getLogger("testCase") self.log.info("\tGroup name %s" % self.group_name) # sets up TestSocket for the correct assertions def expectResponse(self, test_name): self.conn.prepare(test_name) def expectMessage(self, message): self.conn.validate(message) def addClient(self, client_name): self.conn.response = "ack" self.client_name = client_name self.log = logging.getLogger("testCase") ret = self._updateData({"client_name": client_name, "group_size": self.group_size, "role": "tester", "hostname": "localhost", "group_name": self.group_name}) self.log.info("\tAdded client_name '%s'. group size now: %d" % (client_name, len(self.group['clients']))) self.log.info("\tCurrent client_name: '%s'" % self.client_name) return ret def addClientRole(self, client_name, role): self.conn.response = "ack" self.client_name = client_name self.log = logging.getLogger("testCase") ret = self._updateData({"client_name": client_name, "group_size": self.group_size, "role": role, "hostname": "localhost", "group_name": self.group_name}) self.log.info("\tAdded client_name '%s' with role '%s'. group size now: %d" % (client_name, role, len(self.group['clients']))) self.log.info("\tCurrent client_name: '%s'" % self.client_name) return ret class TestPoller(unittest.TestCase): coord = None role = None def setUp(self): self.coord = TestCoordinator() def _wrapMessage(self, message, role): base_msg = { "timeout": 90, "client_name": self.coord.client_name, "group_name": self.coord.group_name, "role": role, } base_msg.update(message) # uncomment to get verbose output # self.log = logging.getLogger("testCase") # self.log.info("\tmessage content: '%s'" % json.dumps(base_msg)) return base_msg def _switch_client(self, name): self.coord.client_name = name def _cleanup(self): self.log = logging.getLogger("testCase") self.log.info("\tClearing group %s after test" % self.coord.group_name) old_name = self.coord.group_name self.coord.expectResponse("ack") self.coord.expectMessage(None) while self.coord.group_size > 0: self.coord._clearGroupData({"group_name": old_name}) self.coord.group_size -= 1 # clear the group name and data self.assertTrue(self.coord.group['group'] != old_name) self.assertTrue(self.coord.group['group'] == '') self.log.info("\tGroup %s cleared correctly." % old_name) self.coord.conn.clearPasses() def test_01_poll(self): """ Check that an empty message gives an empty response """ self.coord.dataReceived({}) def test_02_receive(self): """ Explicitly expect an empty response with an empty message """ self.coord.expectResponse(None) self.coord.dataReceived({}) def test_03_missing_client_name(self): """ Send a malformed message with no client_name, expect a warning """ self.log = logging.getLogger("testCase") self.log.info("\tExpect warning of a missing client name in request") ret = self.coord._updateData({"group_name": self.coord.group_name}) self.assertTrue(ret is None) def test_04_missing_group_size(self): """ Send a malformed message with no group_size, expect a warning. """ self.log = logging.getLogger("testCase") self.log.info("\tExpect warning of new group without specifying the size of the group") ret = self.coord._updateData({ "client_name": self.coord.client_name, "group_name": self.coord.group_name }) self.assertTrue(ret is None) def test_05_start_group_incomplete(self): """ Create a group but fail to populate it with enough devices and cleanup """ self.coord.group_name = str(uuid.uuid4()) self.coord.group_size = 2 self.coord.conn.response = "ack" self.coord.client_name = "incomplete" self.log = logging.getLogger("testCase") ret = self.coord._updateData( {"client_name": self.coord.client_name, "group_size": self.coord.group_size, "role": "tester", "hostname": "localhost", "group_name": self.coord.group_name}) self.log.info("\tAdded client_name '%s'. group size now: %d" % (self.coord.client_name, len(self.coord.group['clients']))) self.log.info("\tCurrent client_name: '%s'" % self.coord.client_name) self.coord.group_size = 1 self.assertTrue(ret == "incomplete") self._cleanup() def test_06_start_group_complete(self): """ Create a group with enough devices and check for no errors. """ self.coord.newGroup(2) ret = self.coord.addClient("completing") self.assertTrue(ret == "completing") ret = self.coord.addClient("completed") self.assertTrue(ret == "completed") self._cleanup() def test_07_lava_send_check(self): """ Create a deliberate typo of an API call and check for a warning. """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.log = logging.getLogger("testCase") self.log.info("\tExpect warning of an unrecognised request due to deliberate typo.") self.coord.expectResponse("nack") send_msg = {"request": "lava-send", "messageID": "sending_test", "message": None} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.coord.expectResponse("ack") send_msg = {"request": "lava_send", "messageID": "sending_test", "message": None} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._cleanup() def test_08_lava_send_keypair(self): """ lava-send key=value - expect an ack """ self.coord.newGroup(2) self.coord.addClient("node one") self.coord.addClient("node two") send_msg = {"request": "lava_send", "messageID": "keyvalue_test", "message": { "key": "value" }} self.log = logging.getLogger("testCase") self.log.info("\tINF: simply send a message and check for ack") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._cleanup() def test_09_lava_wait_check(self): """ lava-wait check without key value pairs """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.coord.expectResponse("ack") send_msg = {"request": "lava_send", "messageID": "sending_test", "message": None} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) wait_msg = {"request": "lava_wait", "messageID": "missing message", "message": None} self.log = logging.getLogger("testCase") self.log.info("\tINF: wait for a message not already sent.") self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self.coord.expectResponse("ack") self.log.info("\tINF: wait for a message which has already been sent.") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.coord.expectResponse("ack") self._cleanup() def test_10_lava_wait_keypair(self): """ lava-wait check with key=value """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.coord.expectResponse("ack") message = {"key": "value"} send_msg = {"request": "lava_send", "messageID": "keyvalue_test", "message": message} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.coord.expectResponse("ack") message = {self.coord.client_name: {"key": "value"}} self.coord.expectMessage(message) wait_msg = {"request": "lava_wait", "messageID": "keyvalue_test", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self.coord.expectMessage(None) self._cleanup() def test_11_lava_wait_all(self): """ lava-wait-all check """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.coord.expectResponse("ack") send_msg = {"request": "lava_send", "messageID": "waitall_test", "message": None} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.log = logging.getLogger("testCase") self.log.info("\tINF: send from node_two first, expect wait") wait_msg = {"request": "lava_wait_all", "messageID": "waitall_test", "message": None} self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self.log.info("\tINF: test node_one waiting before sending a message itself") # FIXME: this may need to become a "nack" with the node outputting a warning self._switch_client("node_one") self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self.log.info("\tINF: now allow node_one to send the right message") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.log.info("\tINF: test node_one after sending a message") self._switch_client("node_one") self.coord.expectResponse("ack") message = {"node_one": {}, "node_two": {}} self.coord.expectMessage(message) self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self._cleanup() def test_12_lava_sync(self): """ lava-sync check """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.coord.expectResponse("wait") self.log = logging.getLogger("testCase") self.log.info("\tINF: %s requests a sync" % self.coord.client_name) sync_msg = {"request": "lava_sync", "messageID": "waitall_test", "message": None} self.coord.dataReceived(self._wrapMessage(sync_msg, "tester")) self._switch_client("node_one") self.log.info("\tINF: %s requests a sync" % self.coord.client_name) self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(sync_msg, "tester")) self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(sync_msg, "tester")) self._switch_client("node_two") self.log.info("\tINF: %s requests a sync" % self.coord.client_name) self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(sync_msg, "tester")) self._cleanup() def test_13_lava_wait_all_role(self): """ lava-wait-all check with role limitation. """ self.coord.newGroup(3) self.coord.addClientRole("client_one", "client") self.coord.addClientRole("client_two", "client") self.coord.addClientRole("server", "server") self.log = logging.getLogger("testCase") self._switch_client("client_two") self.log.info("\tINF: one client waiting before lava_send on any client") self.coord.expectResponse("nack") wait_msg = {"request": "lava_wait_all", "messageID": "wait-all-role", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.log.info("\tINF: Send a message to this group") send_msg = {"request": "lava_send", "messageID": "wait-all-role", "message": None} self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "client")) self.log.info("\tINF:one client waiting before lava_send on the other client") self.coord.expectResponse("wait") wait_msg = {"request": "lava_wait_all", "messageID": "wait-all-role", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self._switch_client("server") # FIXME: this may need to become a "nack" with the node outputting a warning self.log.info("\tINF:server waiting before lava_send on the other client") self.coord.expectResponse("wait") wait_msg = {"request": "lava_wait_all", "messageID": "wait-all-role", "waitrole": "client", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "server")) self._switch_client("client_one") self.log.info("\tINF:Send a message to this group") send_msg = {"request": "lava_send", "messageID": "wait-all-role", "message": None} self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "client")) wait_msg = {"request": "lava_wait_all", "messageID": "wait-all-role", "waitrole": "client", "message": None} self.coord.expectResponse("ack") message = {"client_two": {}, "client_one": {}} self.coord.expectMessage(message) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self._cleanup() def test_14_lava_wait_all_keypair(self): """ lava-wait-all with key value pairs """ self.coord.newGroup(3) self.coord.addClientRole("client_one", "client") self.coord.addClientRole("client_two", "client") self.coord.addClientRole("server", "server") self.log = logging.getLogger("testCase") self._switch_client("client_two") self.coord.expectResponse("ack") message = {"key": "value"} send_msg = {"request": "lava_send", "messageID": "keyvalue_test", "message": message} self.coord.dataReceived(self._wrapMessage(send_msg, "client")) self.coord.expectResponse("wait") wait_msg = {"request": "lava_wait_all", "messageID": "keyvalue_test", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.log.info("\tINF: wait_all - so other clients need to send before we get the message") self._switch_client("client_one") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "client")) self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.log.info("\tINF: this is a wait_all without a role - so server must send too.") self._switch_client("server") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "server")) message = {"client_two": {"key": "value"}, "client_one": {"key": "value"}, "server": {"key": "value"}} self.coord.expectResponse("ack") self.coord.expectMessage(message) self.coord.dataReceived(self._wrapMessage(wait_msg, "server")) self._cleanup() def test_15_lava_wait_all_role_keypair(self): """ lava-wait-all with key value pairs and role limitation. """ self.coord.newGroup(3) self.coord.addClientRole("client_one", "client") self.coord.addClientRole("client_two", "client") self.coord.addClientRole("server", "server") self.log = logging.getLogger("testCase") self._switch_client("client_two") self.coord.expectResponse("ack") message = {"key": "value"} send_msg = {"request": "lava_send", "messageID": "keyvalue_test", "message": message} self.coord.dataReceived(self._wrapMessage(send_msg, "client")) self.coord.expectResponse("wait") wait_msg = {"request": "lava_wait_all", "messageID": "keyvalue_test", "waitrole": "client", "message": None} self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.log.info("\tINF: wait_all - so other clients need to send before we get the message") self._switch_client("client_one") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "client")) message = {"client_two": {"key": "value"}, "client_one": {"key": "value"}} self.coord.expectResponse("ack") self.coord.expectMessage(message) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self.log.info("\tINF: this is a wait_all with a role - so server will be ignored.") self._switch_client("server") self.coord.expectMessage(None) self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "server")) self.log.info("\tINF: call to wait by the server was ignored.") self.coord.expectResponse("ack") self.log.info("\tINF: checking that the messageID is persistent.") message = {"client_two": {"key": "value"}, "client_one": {"key": "value"}, "server": {"key": "value"}} self.coord.expectMessage(message) self.coord.dataReceived(self._wrapMessage(wait_msg, "client")) self._cleanup() def test_16_lava_network(self): """ Simulate calls to lava-network using real data from multinode.validation.linaro.org at the node & coordinator level. """ msg02 = {"message": { "hostname-full": "imx53-02.localdomain", "hostname": "imx53-02", "netmask": "Mask:255.255.0.0", "dns_1": "192.168.1.32", "default-gateway": "192.168.1.1", "ipv6": "addr:", "ipv4": "addr:192.168.106.189"}, "request": "lava_send", "messageID": "network_info"} msg04 = {"message": { "hostname-full": "imx53-04.localdomain", "hostname": "imx53-04", "netmask": "Mask:255.255.0.0", "dns_1": "192.168.1.32", "default-gateway": "192.168.1.1", "ipv6": "addr:", "ipv4": "addr:192.168.106.180"}, "request": "lava_send", "messageID": "network_info"} reply = {"imx53-02": {"hostname-full": "imx53-02.localdomain", "hostname": "imx53-02", "netmask": "Mask:255.255.0.0", "dns_1": "192.168.1.32", "default-gateway": "192.168.1.1", "ipv6": "addr:", "ipv4": "addr:192.168.106.189"}, "imx53-04": {"hostname-full": "imx53-04.localdomain", "hostname": "imx53-04", "netmask": "Mask:255.255.0.0", "dns_1": "192.168.1.32", "default-gateway": "192.168.1.1", "ipv6": "addr:", "ipv4": "addr:192.168.106.180"}} self.coord.newGroup(2) self.coord.addClientRole("imx53-02", "network") self.coord.addClientRole("imx53-04", "network") self.log = logging.getLogger("testCase") self.log = logging.getLogger("testCase") self.log.info("\tINF: Start by sending data for imx53-02 (broadcast)") self._switch_client("imx53-02") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(msg02, "network")) self.log.info("\tINF: collect should wait until the other client sends.") wait_msg = {"request": "lava_wait_all", "messageID": "network_info", "message": None} self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "network")) self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "network")) self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(wait_msg, "network")) self.log.info("\tINF: Send data for imx53-04") self._switch_client("imx53-04") self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(msg04, "network")) wait_msg = {"request": "lava_wait_all", "messageID": "network_info", "message": None} self.coord.expectResponse("ack") self.coord.expectMessage(reply) self.coord.dataReceived(self._wrapMessage(wait_msg, "network")) self._cleanup() def test_17_nack_check(self): """ Create a deliberate nack messageID and check for a warning. """ self.coord.newGroup(2) self.coord.addClient("node_one") self.coord.addClient("node_two") self.log = logging.getLogger("testCase") self.coord.expectResponse("ack") send_msg = {"request": "lava_send", "messageID": "nack", "message": None} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) wait_msg = {"request": "lava_wait", "messageID": "nack", "message": None} self.coord.expectMessage({"node_two": {}}) self.coord.dataReceived(self._wrapMessage(wait_msg, "tester")) self._cleanup() def test_18_aggregation(self): """ Check that a syb_id zero waits for all pending result bundles """ self.coord.newGroup(3) self.coord.addClient("controller") self.coord.addClient("node_one") self.coord.addClient("node_two") self.log = logging.getLogger("testCase") self._switch_client("controller") self.coord.expectResponse("nack") send_msg = {"request": "aggregate"} self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) send_msg["bundle"] = None self.coord.expectResponse("nack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) send_msg["sub_id"] = None self.coord.expectResponse("nack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) # It is OK to reuse the same bundle - only the database cares about duplicate assigned_uuid fields etc. send_msg['bundle'] = bundle_sample self.coord.expectResponse("nack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.log.info("Setting a zero sub_id - expect wait") send_msg["sub_id"] = "10.0" for _ in range(6): self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._switch_client("node_one") send_msg["sub_id"] = "10.1" self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._switch_client("controller") send_msg["sub_id"] = "10.0" for _ in range(6): self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._switch_client("node_two") send_msg["sub_id"] = "10.2" self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._switch_client("controller") send_msg["sub_id"] = "10.0" for _ in range(self.coord.rpc_delay): self.coord.expectResponse("wait") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self.coord.expectResponse("ack") self.coord.dataReceived(self._wrapMessage(send_msg, "tester")) self._cleanup() def main(): FORMAT = '%(msg)s' logging.basicConfig(format=FORMAT) logging.basicConfig(stream=sys.stderr) logging.getLogger("testCase").setLevel(logging.DEBUG) suite = unittest.TestLoader().loadTestsFromTestCase(TestPoller) runner = unittest.TextTestRunner(verbosity=2) res = runner.run(suite) if not res.wasSuccessful(): sys.exit(1) return 0 if __name__ == '__main__': main() lava-coordinator-0.1.7/tests/testpoller.pyc0000644000175000017500000006535412472342133020755 0ustar neilneil00000000000000ó ĹéTc@sÐddlZddlZddlZddlZddlZddlmZii iidd6dd6dd6d d 6gd 6id d 6d6idd6dd 6idd6dd 6gd6d6id d6dd6dd6id d6dd6dd6idd6dd6dd6id d6dd6dd6gd6d d!6ed"6id#d$6id%6gd6d&d'6id#d$6id%6gd6d(d'6id#d$6id%6gd6d)d'6gd*6id+d,6d-d.6dd/6d d6d0d16d26iiid d36d4d56d6d76d8d96d%6d:d,6gd;6d<6d=d>6id?d@6dAdB6dAdC6dDdE6dFdG6dHdI6dJdK6d6dL6d%6dMdN6gdO6dPd.6ZdQe fdR„ƒYZ dSe fdT„ƒYZ dUefdV„ƒYZ dWej fdX„ƒYZdY„ZedZkrÌeƒndS([iÿÿÿÿN(tLavaCoordinators;git://git.linaro.org/people/neilwilliams/multinode-yaml.gitt branch_urltgitt branch_vcssmultinode-yamlt project_namet(f61e707c6d3da75d90735a75e6dc6aca55f1142btbranch_revisiontsourcesttnametimages1:2.0.16-1+deb7u1tversiontacpids1:1.2.7.dfsg-13tzlib1gtpackagestsoftware_contexttcontents stdout.logtpathnames text/plaint mime_types testdef.yamlsMAo=t return_codesrun.sht attachmentss2013-08-13T19:27:41Ztanalyzer_assigned_datettime_check_performedtpasstresultt attributesslinux-linaro-ubuntu-pwdt test_case_idsmultinode-role-outputsmultinode-lava-networkt test_resultss<Basic MultiNode test commands for Linux Linaro ubuntu Imagest descriptionsLava-Test Test Definition 1.0tformatturltGITtlocationttestdef_metadataspower managementt4s cpuid levelt2tmodeltyestwps Processor #0tdevicesthardware_contexts$2eb4898b-ba33-42e1-ab71-025f18feef81tanalyzer_assigned_uuids$da4ed985-80e9-43bf-acd5-4e18d03300b9t target_groupsmultinode-kvm01ttargetstarget.hostnames1.0starget.device_versiontfelixtroletkvmstarget.device_typetDEBUGt logging_levelt group_sizessmoke-tests-multinodettest_idt test_runssDashboard Bundle Format 1.6t TestSignalscBs eZdZd„Zd„ZRS(RcCsrt|ƒtkrkxV|jƒD]E\}}x6|jƒD](\}}|jd|||f7_q8WqWn|jS(Ns %s:%s=%s(ttypetdicttitemst message_str(tselftreplyR+tmessagestkeytvalue((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt formatStringKs 'cCsB|dk r>tjdƒ|_|jjd|j|ƒƒndS(NttestCases (tNonetloggingt getLoggertlogtinfoR>(R9R:((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt checkMessageRs (t__name__t __module__R8R>RE(((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyR4Gs t TestSocketcBskeZdZeZdZdZdZdZ d„Z d„Z d„Z d„Z d„Zd„Zd„ZRS( icCs"tjdƒ|_tƒ|_dS(NR?(RARBRCR4t signalHandler(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt__init__ascCsÿ|jrMt|_t|dƒdks-t‚|jjdt|dƒƒn®ytj|ƒ}Wntk rts€t‚nX|j s¬|ddksŸt‚t |_dSd|ks¾t‚|jjd|dƒ|jjdtj |ƒƒ|d|j ks t‚|j d7_ |j r‚d |ks6t‚|jjd tj |d ƒƒ|d |j kspt‚|j d7_ npd |krãt|d ƒtkrÈ|jjd tj |d ƒƒqã|jjd |d ƒn|j d7_ t |_dS( Niiþÿs Coordinator header: %d bytestresponsetnacks Coordinator response: '%s's debug: %sitmessages% Coordinator received a message: '%s's$ Could have expected a message: '%s's (theadertFalsetinttAssertionErrorRCRDtjsontloadst ValueErrorRKtTruetdumpstpassesRMR5R6(R9tdatat json_data((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytsendes8       ! $cCs|jjdƒdS(Ns Coordinator closing.(RCRD(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytclose‰scCs d|_dS(Ni(RW(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt clearPassesŒscCsD|jdkr)|jjd|jƒn|jjd|jƒdS(Nis# Coordinator: %d socket test passeds$ Coordinator: %d socket tests passed(RWRCRD(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt logPassesscCs0||_|jr,|jjd|jƒndS(Ns( Coordinator: expecting a response: '%s'(RKRCRD(R9R ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytprepare•s  cCsL||_|jr5|jjdtj|jƒƒn|jj|jƒdS(Ns' Coordinator: expecting a message: '%s'(RMRCRDRRRVRIRE(R9RM((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytvalidatešs  #N(RFRGR@RKRURNRCRMRWRIRJRZR[R\R]R^R_(((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyRHXs  $    tTestCoordinatorcBsheZeZdZdZdZdZdZ dZ d„Z d„Z d„Z d„Zd„Zd„ZRS(icCsµtt|ƒjƒttjƒƒ|_tƒ|_t j dƒ|_ |j j dƒidd6|_ d|_|j j dtj|j ƒ|j|j|jfƒ|jdƒdS(NR?Rttestingtrequestt testpollers Starting test with %s %d %d %s(tsuperRRJtstrtuuidtuuid4t group_nameRHtconnRARBRCRDRYt client_nameRRRVt rpc_delayt blocksizethosttexpectResponseR@(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyRJ«s   cCsKttjƒƒ|_||_tjdƒ|_|jjd|jƒdS(NR?s Group name %s( ReRfRgRhR1RARBRCRD(R9tsize((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytnewGroup¸s cCs|jj|ƒdS(N(RiR^(R9t test_name((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyRn¿scCs|jj|ƒdS(N(RiR_(R9RM((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt expectMessageÂscCs¡d|j_||_tjdƒ|_|ji|d6|jd6dd6dd6|jd 6ƒ}|jj d |t |j d ƒfƒ|jj d |jƒ|S( NtackR?RjR1ttesterR-t localhostthostnameRhs+ Added client_name '%s'. group size now: %dtclientss Current client_name: '%s'( RiRKRjRARBRCt _updateDataR1RhRDtlentgroup(R9Rjtret((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt addClientÅs    cCs¤d|j_||_tjdƒ|_|ji|d6|jd6|d6dd6|jd6ƒ}|jj d ||t |j d ƒfƒ|jj d |jƒ|S( NRsR?RjR1R-RuRvRhs: Added client_name '%s' with role '%s'. group size now: %dRws Current client_name: '%s'( RiRKRjRARBRCRxR1RhRDRyRz(R9RjR-R{((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt addClientRoleÓs    N(RFRGRUtrunningR@RYRhR1RjRiRCRJRpRnRrR|R}(((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyR`¡s    t TestPollercBsÚeZdZdZd„Zd„Zd„Zd„Zd„Z d„Z d„Z d„Z d„Z d „Zd „Zd „Zd „Zd „Zd„Zd„Zd„Zd„Zd„Zd„Zd„Zd„ZRS(cCstƒ|_dS(N(R`tcoord(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pytsetUpçscCs?idd6|jjd6|jjd6|d6}|j|ƒ|S(NiZttimeoutRjRhR-(R€RjRhtupdate(R9RMR-tbase_msg((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt _wrapMessageês    cCs||j_dS(N(R€Rj(R9R ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt_switch_client÷scCsütjdƒ|_|jjd|jjƒ|jj}|jjdƒ|jjdƒx?|jj dkr™|jj i|d6ƒ|jj d8_ q[W|j |jj d|kƒ|j |jj ddkƒ|jjd |ƒ|jj jƒdS( NR?s Clearing group %s after testRsiRhiRzRs Group %s cleared correctly.(RARBRCRDR€RhRnRrR@R1t_clearGroupDatat assertTrueRzRiR\(R9told_name((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt_cleanupús cCs|jjiƒdS(s= Check that an empty message gives an empty response N(R€t dataReceived(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt test_01_poll scCs$|jjdƒ|jjiƒdS(sC Explicitly expect an empty response with an empty message N(R€RnR@R‹(R9((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_02_receivescCsXtjdƒ|_|jjdƒ|jji|jjd6ƒ}|j|dkƒdS(sH Send a malformed message with no client_name, expect a warning R?s3 Expect warning of a missing client name in requestRhN( RARBRCRDR€RxRhRˆR@(R9R{((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_03_missing_client_namescCsetjdƒ|_|jjdƒ|jji|jjd6|jjd6ƒ}|j|dkƒdS(sH Send a malformed message with no group_size, expect a warning. R?sE Expect warning of new group without specifying the size of the groupRjRhN( RARBRCRDR€RxRjRhRˆR@(R9R{((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_04_missing_group_sizes   cCsttjƒƒ|j_d|j_d|jj_d|j_t j dƒ|_ |jj i|jjd6|jjd6dd6d d 6|jjd 6ƒ}|j j d |jjt|jjd ƒfƒ|j j d|jjƒd|j_|j|dkƒ|jƒdS(sP Create a group but fail to populate it with enough devices and cleanup iRst incompleteR?RjR1RtR-RuRvRhs+ Added client_name '%s'. group size now: %dRws Current client_name: '%s'iN(ReRfRgR€RhR1RiRKRjRARBRCRxRDRyRzRˆRŠ(R9R{((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_05_start_group_incomplete's"     $ cCsh|jjdƒ|jjdƒ}|j|dkƒ|jjdƒ}|j|dkƒ|jƒdS(sE Create a group with enough devices and check for no errors. it completingt completedN(R€RpR|RˆRŠ(R9R{((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_06_start_group_complete<s cCsî|jjdƒ|jjdƒ|jjdƒtjdƒ|_|jjdƒ|jjdƒidd6d d 6dd 6}|jj |j |d ƒƒ|jjd ƒidd6d d 6dd 6}|jj |j |d ƒƒ|j ƒdS(sJ Create a deliberate typo of an API call and check for a warning. itnode_onetnode_twoR?sB Expect warning of an unrecognised request due to deliberate typo.RLs lava-sendRbt sending_testt messageIDRMRtRst lava_sendN( R€RpR|RARBRCRDRnR@R‹R…RŠ(R9tsend_msg((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_07_lava_send_checkFs     cCs®|jjdƒ|jjdƒ|jjdƒidd6dd6idd 6d 6}tjd ƒ|_|jjd ƒ|jjd ƒ|jj|j |dƒƒ|j ƒdS(s- lava-send key=value - expect an ack isnode onesnode twoR™Rbt keyvalue_testR˜R=R<RMR?s- INF: simply send a message and check for ackRsRtN( R€RpR|RARBRCRDRnR‹R…RŠ(R9Rš((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_08_lava_send_keypairZs cCsJ|jjdƒ|jjdƒ|jjdƒ|jjdƒidd6dd6dd 6}|jj|j|d ƒƒid d6d d6dd 6}tjd ƒ|_ |j j dƒ|jjdƒ|jj|j|d ƒƒ|jjdƒ|j j dƒ|jjdƒ|jj|j|d ƒƒ|jjdƒ|j ƒdS(s1 lava-wait check without key value pairs iR•R–RsR™RbR—R˜RMRtt lava_waitsmissing messageR?s* INF: wait for a message not already sent.twaits5 INF: wait for a message which has already been sent.N( R€RpR|RnR@R‹R…RARBRCRDRŠ(R9Rštwait_msg((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_09_lava_wait_checkks*    cCs|jjdƒ|jjdƒ|jjdƒ|jjdƒidd6}idd6d d 6|d 6}|jj|j|d ƒƒ|jjdƒiidd6|jj6}|jj|ƒid d6d d 6dd 6}|jj|j|d ƒƒ|jjdƒ|j ƒdS(s( lava-wait check with key=value iR•R–RsR=R<R™RbRœR˜RMRtRžN( R€RpR|RnR‹R…RjRrR@RŠ(R9RMRšR ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_10_lava_wait_keypair„s$     cCsà|jjdƒ|jjdƒ|jjdƒ|jjdƒidd6dd6dd 6}|jj|j|d ƒƒtjd ƒ|_ |j j d ƒid d6dd6dd 6}|jjdƒ|jj|j|d ƒƒ|j j dƒ|j dƒ|jjdƒ|jj|j|d ƒƒ|j j dƒ|jjdƒ|jj|j|d ƒƒ|j j dƒ|j dƒ|jjdƒiid6id6}|jj |ƒ|jj|j|d ƒƒ|j ƒdS(s lava-wait-all check iR•R–RsR™Rbt waitall_testR˜RMRtR?s+ INF: send from node_two first, expect waitt lava_wait_allRŸs; INF: test node_one waiting before sending a message itselfs2 INF: now allow node_one to send the right messages+ INF: test node_one after sending a messageN(R€RpR|RnR@R‹R…RARBRCRDR†RrRŠ(R9RšR RM((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_11_lava_wait_allšs:      cCsƒ|jjdƒ|jjdƒ|jjdƒ|jjdƒtjdƒ|_|jjd|jjƒidd6d d 6dd 6}|jj |j |d ƒƒ|j dƒ|jjd|jjƒ|jjdƒ|jj |j |d ƒƒ|jjd ƒ|jj |j |d ƒƒ|j dƒ|jjd|jjƒ|jjd ƒ|jj |j |d ƒƒ|j ƒdS(s lava-sync check iR•R–RŸR?s INF: %s requests a synct lava_syncRbR£R˜RMRtRsN(R€RpR|RnRARBRCRDRjR@R‹R…R†RŠ(R9tsync_msg((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_12_lava_sync¼s*    cCs¼|jjdƒ|jjddƒ|jjddƒ|jjddƒtjdƒ|_|jdƒ|jjdƒ|jjdƒid d 6d d 6dd 6}|jj |j |dƒƒ|jjdƒidd 6d d 6dd 6}|jjdƒ|jj |j |dƒƒ|jjdƒ|jjdƒid d 6d d 6dd 6}|jj |j |dƒƒ|jdƒ|jjdƒ|jjdƒid d 6d d 6dd6dd 6}|jj |j |dƒƒ|jdƒ|jjdƒidd 6d d 6dd 6}|jjdƒ|jj |j |dƒƒid d 6d d 6dd6dd 6}|jjdƒiid6id6}|jj |ƒ|jj |j |dƒƒ|j ƒdS(s3 lava-wait-all check with role limitation. it client_onetclientt client_twotserverR?s7 INF: one client waiting before lava_send on any clientRLR¤Rbs wait-all-roleR˜RMs" INF: Send a message to this groupR™Rss< INF:one client waiting before lava_send on the other clientRŸs8 INF:server waiting before lava_send on the other clienttwaitroles! INF:Send a message to this groupN(R€RpR}RARBRCR†RDRnR@R‹R…RrRŠ(R9R RšRM((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_13_lava_wait_all_roleÕs`               cCs«|jjdƒ|jjddƒ|jjddƒ|jjddƒtjdƒ|_|jdƒ|jjdƒidd 6}id d 6d d 6|d6}|jj|j |dƒƒ|jjdƒidd 6d d 6dd6}|jj|j |dƒƒ|jj|j |dƒƒ|jj|j |dƒƒ|jj dƒ|jdƒ|jjdƒ|jj|j |dƒƒ|jjdƒ|jj|j |dƒƒ|jj|j |dƒƒ|jj|j |dƒƒ|jj dƒ|jdƒ|jjdƒ|jj|j |dƒƒiidd 6d6idd 6d6idd 6d6}|jjdƒ|jj |ƒ|jj|j |dƒƒ|j ƒdS(s, lava-wait-all with key value pairs iR©RªR«R¬R?RsR=R<R™RbRœR˜RMRŸR¤sH INF: wait_all - so other clients need to send before we get the messagesB INF: this is a wait_all without a role - so server must send too.N(R€RpR}RARBRCR†RnR‹R…R@RDRrRŠ(R9RMRšR ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_14_lava_wait_all_keypair sL        cCsÜ|jjdƒ|jjddƒ|jjddƒ|jjddƒtjdƒ|_|jdƒ|jjdƒidd 6}id d 6d d 6|d6}|jj|j |dƒƒ|jjdƒidd 6d d 6dd6dd6}|jj|j |dƒƒ|jj|j |dƒƒ|jj|j |dƒƒ|jj dƒ|jdƒ|jjdƒ|jj|j |dƒƒiidd 6d6idd 6d6}|jjdƒ|jj |ƒ|jj|j |dƒƒ|jj dƒ|jdƒ|jj dƒ|jjdƒ|jj|j |dƒƒ|jj dƒ|jjdƒ|jj dƒiidd 6d6idd 6d6idd 6d6}|jj |ƒ|jj|j |dƒƒ|j ƒdS(sA lava-wait-all with key value pairs and role limitation. iR©RªR«R¬R?RsR=R<R™RbRœR˜RMRŸR¤R­sH INF: wait_all - so other clients need to send before we get the messagesA INF: this is a wait_all with a role - so server will be ignored.s- INF: call to wait by the server was ignored.s0 INF: checking that the messageID is persistent.N(R€RpR}RARBRCR†RnR‹R…R@RDRrRŠ(R9RMRšR ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyt"test_15_lava_wait_all_role_keypair4sV        cCsiidd6dd6dd6dd6d d 6d d 6d d6d6dd6dd6}iidd6dd6dd6dd6d d 6d d 6dd6d6dd6dd6}iidd6dd6dd6dd6d d 6d d 6d d6d6idd6dd6dd6dd6d d 6d d 6dd6d6}|jjdƒ|jjddƒ|jjddƒtjdƒ|_tjdƒ|_|jjdƒ|jdƒ|jjdƒ|jj |j |dƒƒ|jjdƒidd6dd6d d6}|jjdƒ|jj |j |dƒƒ|jjdƒ|jj |j |dƒƒ|jjdƒ|jj |j |dƒƒ|jjdƒ|jdƒ|jjdƒ|jj |j |dƒƒidd6dd6d d6}|jjdƒ|jj |ƒ|jj |j |dƒƒ|j ƒd S(!s† Simulate calls to lava-network using real data from multinode.validation.linaro.org at the node & coordinator level. simx53-02.localdomains hostname-fullsimx53-02RvsMask:255.255.0.0tnetmasks 192.168.1.32tdns_1s 192.168.1.1sdefault-gatewaysaddr:tipv6saddr:192.168.106.189tipv4RMR™Rbt network_infoR˜simx53-04.localdomainsimx53-04saddr:192.168.106.180itnetworkR?s4 INF: Start by sending data for imx53-02 (broadcast)Rss7 INF: collect should wait until the other client sends.R¤RŸs INF: Send data for imx53-04N(R€RpR}RARBRCRDR†RnR‹R…R@RrRŠ(R9tmsg02tmsg04R:R ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_16_lava_networkcsd          cCså|jjdƒ|jjdƒ|jjdƒtjdƒ|_|jjdƒidd6dd 6d d 6}|jj|j |d ƒƒid d6dd 6d d 6}|jj iid6ƒ|jj|j |d ƒƒ|j ƒd S(sE Create a deliberate nack messageID and check for a warning. iR•R–R?RsR™RbRLR˜RMRtRžN( R€RpR|RARBRCRnR@R‹R…RrRŠ(R9RšR ((sD/home/neil/PycharmProjects/LAVA/lava-coordinator/tests/testpoller.pyttest_17_nack_checkšs    cCs|jjdƒ|jjdƒ|jjdƒ|jjdƒtjdƒ|_|jdƒ|jjdƒidd6}|jj|j |d ƒƒd|d <|jjdƒ|jj|j |d ƒƒd|d <|jjdƒ|jj|j |d ƒƒt |d <|jjdƒ|jj|j |d ƒƒ|jj d ƒd |d sX      &&  IAÿþ lava-coordinator-0.1.7/lava/0000755000175000017500000000000012546502022015604 5ustar neilneil00000000000000lava-coordinator-0.1.7/lava/coordinator.py0000644000175000017500000006001012546501774020513 0ustar neilneil00000000000000import time import socket import logging import json # Copyright 2013 Linaro Limited # Author Neil Williams # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. class LavaCoordinator(object): running = False delay = 1 rpc_delay = 2 blocksize = 4 * 1024 all_groups = {} # All data handling for each connection happens on this local reference into the # all_groups dict with a new group looked up each time. group = None conn = None host = "localhost" def __init__(self, json_data): """ Initialises the LAVA Coordinator singleton A single Coordinator serves all groups managed by a lava-server or lab, including supporting groups across different instances, if that is desired. Different coordinators on one machine must run on different ports. :param json_data: incoming target_group based data used to determine the port """ self.group_port = 3079 if 'port' in json_data: self.group_port = json_data['port'] if 'blocksize' in json_data: self.blocksize = json_data['blocksize'] if 'host' in json_data: self.host = json_data['host'] def run(self): s = None while 1: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: logging.debug("binding to %s:%s" % (self.host, self.group_port)) s.bind(('0.0.0.0', self.group_port)) break except socket.error as e: logging.warn("Unable to bind, trying again with delay=%d msg=%s" % (self.delay, e.message)) time.sleep(self.delay) self.delay *= 2 s.listen(1) self.running = True while self.running: logging.info("Ready to accept new connections") self.conn, addr = s.accept() # read the header to get the size of the message to follow data = str(self.conn.recv(8)) # 32bit limit try: count = int(data, 16) except ValueError: logging.debug("Invalid message: %s from %s" % (data, self.conn.getpeername()[0])) self.conn.close() continue c = 0 data = '' # get the message itself while c < count: data += self.conn.recv(self.blocksize) c += self.blocksize try: json_data = json.loads(data) except ValueError: logging.warn("JSON error for '%s'" % data[:100]) self.conn.close() continue self.dataReceived(json_data) def _updateData(self, json_data): """ Sanity checks the JSON data and retrieves the data for the group specified :param json_data: JSON request :return: the client_name specified in the JSON to allow the message handler to lookup the correct messages within this group. """ self._clear_group() if 'client_name' in json_data: client_name = json_data['client_name'] else: logging.error("Missing client_name in request: %s" % json_data) return None if json_data['group_name'] not in self.all_groups: if "group_size" not in json_data or json_data["group_size"] == 0: logging.error('%s asked for a new group %s without specifying the size of the group' % (client_name, json_data['group_name'])) return None # auto register a new group self.group["count"] = int(json_data["group_size"]) self.group["group"] = json_data["group_name"] self.all_groups[json_data["group_name"]] = self.group logging.info("The %s group will contain %d nodes." % (self.group["group"], self.group["count"])) self.group = self.all_groups[json_data['group_name']] # now add this client to the registered data for this group if client_name not in self.group['clients']: self.group['clients'][client_name] = json_data['hostname'] if json_data['role'] not in self.group['roles']: self.group['roles'][json_data['role']] = [] self.group['roles'][json_data['role']].append(client_name) return client_name def _clear_group(self): self.group = { 'group': '', 'count': 0, 'complete': 0, 'rpc_delay': self.rpc_delay, 'clients': {}, 'roles': {}, 'syncs': {}, 'messages': {}, 'waits': {}, 'bundles': {} } def _clearGroupData(self, json_data): """ Clears the group data once all nodes have finished. Nodes do *not* wait for other nodes to finish. :param json_data: incoming JSON request """ if 'group_name' not in json_data: self._badRequest() return if json_data['group_name'] not in self.all_groups: self._badRequest() return self.group['complete'] += 1 logging.debug("clear Group Data: %d of %d" % (self.group['complete'], len(self.group['clients']))) self._ackResponse() if len(self.group['clients']) > self.group['complete']: return logging.debug("Clearing group data for %s" % json_data['group_name']) del self.all_groups[json_data['group_name']] self._clear_group() def _setGroupData(self, json_data): """ Implements the wait until all clients in this group have connected :rtype : None :param json_data: incoming JSON request """ if len(self.group['clients']) != self.group['count']: logging.info("Waiting for %d more clients to connect to %s group" % ((self.group['count'] - len(self.group['clients']), json_data['group_name']))) # group_data is not complete yet. self._waitResponse() return logging.info("Group complete, starting tests") # client_name must be unique because it's the DB index & conf file name group_data = {} for role in self.group['roles']: for client in self.group['roles'][role]: group_data[client] = role msg = {"response": "group_data", "roles": group_data} msgdata = self._formatMessage(msg) if msgdata: self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _formatMessage(self, message): """ Prepares the LAVA Coordinator header and a JSON string of the message ready for transmission. Currently, the header is just the length of the JSON string as a hexadecimal string padded to 8 characters (not including 0x) :param message: Python object suitable for conversion into JSON :rtype : A tuple - first value is the header, second value is the data to send, returns None if the message could not be formatted. """ try: msgstr = json.dumps(message) except ValueError: return None # "header" calculation msglen = "%08X" % len(msgstr) if int(msglen, 16) > 0xFFFFFFFF: logging.error("Message was too long to send! %d > %d" % (int(msglen, 16), 0xFFFFFFFF)) return None return msglen, msgstr def _sendMessage(self, client_name, messageID): """ Sends a message to the currently connected client. (the "connection name" or hostname of the connected client does not necessarily match the name of the client registered with the group.) :param client_name: the client_name to lookup for the message :param messageID: the message index set by lavaSend :rtype : None """ if client_name not in self.group['messages'] or messageID not in self.group['messages'][client_name]: logging.error("Unable to find messageID %s for client %s" % (messageID, client_name)) self._badRequest() return logging.info("Sending messageID '%s' to %s in group %s: %s" % (messageID, client_name, self.group['group'], json.dumps(self.group['messages'][client_name][messageID]))) msg = {"response": "ack", "message": self.group['messages'][client_name][messageID]} msgdata = self._formatMessage(msg) if msgdata: logging.info("Sending response to %s in group %s: %s" % (client_name, self.group['group'], json.dumps(msg))) self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _sendWaitMessage(self, client_name, messageID): """ Sends a wait message to the currently connected client. (the "connection name" or hostname of the connected client does not necessarily match the name of the client registered with the group.) :param client_name: the client_name to lookup for the message :param messageID: the message index set by lavaSend :rtype : None """ if messageID not in self.group['waits'] or client_name not in self.group['waits'][messageID]: logging.error("Unable to find messageID %s for client %s" % (messageID, client_name)) self._badRequest() return logging.info("Sending wait messageID '%s' to %s in group %s: %s" % (messageID, client_name, self.group['group'], json.dumps(self.group['waits'][messageID]['data']))) msg = {"response": "ack", "message": self.group['waits'][messageID]['data']} msgdata = self._formatMessage(msg) if msgdata: logging.info("Sending wait response to %s in group %s: %s" % (client_name, self.group['group'], json.dumps(msg))) self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _getMessage(self, json_data): # message value is allowed to be None as long as the message key exists. if 'message' not in json_data: return {} if 'messageID' not in json_data: logging.error("No 'messageID' key found in request %s when looking for message." % json.dumps(json_data)) return {} if json_data['message'] is None: return {} return json_data['message'] def _getMessageID(self, json_data): if 'messageID' not in json_data: logging.error("No 'messageID' key found in request %s when looking for ID" % json.dumps(json_data)) return None return json_data['messageID'] def _badRequest(self): msgdata = self._formatMessage({"response": "nack"}) if msgdata: self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _ackResponse(self): msgdata = self._formatMessage({"response": "ack"}) if msgdata: self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _waitResponse(self): msgdata = self._formatMessage({"response": "wait"}) if msgdata: self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.conn.close() def _aggregateBundle(self, json_data, client_name): """ *All* nodes must call aggregate, even if there is no bundle to submit from this board. :param json_data: the request header and the bundle itself :param client_name: the board identifier in the group data """ if "bundle" not in json_data: logging.debug("Aggregate called without a bundle in the JSON") self._badRequest() return if "sub_id" not in json_data or json_data["sub_id"] is None: logging.debug("Aggregation called without a valid sub_id in the JSON") self._badRequest() return self.group['bundles'][client_name] = json_data["bundle"] if json_data["sub_id"].endswith(".0"): logging.info("len:%d count:%d" % (len(self.group['bundles']), self.group['count'])) if len(self.group['bundles']) < self.group['count']: logging.info("Waiting for the rest of the group to complete the job.") self._waitResponse() self.group['rpc_delay'] = self.rpc_delay else: # xmlrpc can take time, so allow the last node to submit before finishing the group if self.group['rpc_delay'] > 0: logging.debug("Asking sub_id zero to pause while a pending XMLRPC call is made.") self._waitResponse() self.group['rpc_delay'] -= 1 return logging.debug("Sending bundle list to sub_id zero") msg = {"response": "ack", "message": {"bundle": self.group['bundles']}} msgdata = self._formatMessage(msg) if msgdata: self.conn.send(msgdata[0]) self.conn.send(msgdata[1]) self.group['rpc_delay'] = self.rpc_delay self.conn.close() else: logging.debug("not sub_id zero") self._ackResponse() def lavaSync(self, json_data, client_name): """ Global synchronization primitive. Sends a message and waits for the same message from all of the other devices. """ logging.debug("Coordinator:lavaSync %s from %s in group %s" % (json.dumps(json_data), client_name, self.group['group'])) messageID = self._getMessageID(json_data) message = self._getMessage(json_data) # send the messageID as the message if message is empty if not message: message = messageID logging.info("LavaSync request for '%s' at stage '%s' in group '%s'" % (client_name, messageID, self.group['group'])) self.group['syncs'].setdefault(messageID, {}) self.group['messages'].setdefault(client_name, {}).setdefault(messageID, {}) if len(self.group['syncs'][messageID]) >= self.group['count']: self.group['messages'][client_name][messageID] = message self._sendMessage(client_name, messageID) # mark this client as having picked up the message self.group['syncs'][messageID][client_name] = 0 else: logging.info("waiting for '%s': not all clients in group '%s' have been seen yet %d < %d" % (messageID, self.group['group'], len(self.group['syncs'][messageID]), self.group['count'])) self.group['messages'][client_name][messageID] = message self.group['syncs'][messageID][client_name] = 1 self._waitResponse() return # clear the sync data for this messageID when the last client connects to # allow the message to be re-used later for another sync clear_syncs = True for pending in self.group['syncs'][messageID]: if self.group['syncs'][messageID][pending]: clear_syncs = False if clear_syncs: logging.debug("Clearing all sync messages for '%s' in group '%s'" % (messageID, self.group['group'])) self.group['syncs'][messageID].clear() def lavaWaitAll(self, json_data, client_name): """ Waits until all other devices in the group send a message with the given message ID. IF is passed, only wait until all devices with that given role send a message. """ messageID = self._getMessageID(json_data) if 'waitrole' in json_data: expected = self.group['roles'][json_data['waitrole']] expected = expected[0] if type(expected) == list else None logging.debug( "lavaWaitAll waiting for role:%s from %s" % ( json_data['waitrole'], expected) ) for client in self.group['roles'][json_data['role']]: logging.debug("checking %s for wait message" % client) if messageID not in self.group['waits']: logging.debug("messageID %s not yet seen" % messageID) self._waitResponse() return if expected and expected in self.group['waits'][messageID]: logging.debug("Replying that %s has sent %s" % (client_name, messageID)) self._sendWaitMessage(client_name, messageID) return if client not in self.group['waits'][messageID]: logging.debug("FIXME: %s not in waits for %s: %s" % ( client, messageID, self.group['waits'][messageID])) # FIXME: bug? if this client has not sent the messageID yet, # causing it to wait will simply force a timeout. node needs # to output a warning, so maybe send a "nack" ? self._waitResponse() return if client in self.group['waits']: logging.debug("Replying: %s for %s" % (messageID, client_name)) if client_name in self.group['waits']: logging.debug("lavaWaitAll message: %s" % json.dumps(self.group['waits'][client_name][messageID])) else: logging.debug("lavaWaitAll: no role.") for client in self.group['clients']: logging.debug("checking %s for wait message" % client) if messageID not in self.group['waits']: self._badRequest() return if client not in self.group['waits'][messageID]: logging.debug("setting waiting for %s" % client) self._waitResponse() return self._sendWaitMessage(client_name, messageID) def lavaWait(self, json_data, client_name): """ Waits until any other device in the group sends a message with the given ID. This call will block the client until such message is sent, the server continues. :param json_data: the JSON request :param client_name: the client_name to receive the message """ messageID = self._getMessageID(json_data) if client_name not in self.group['messages'] or messageID not in self.group['messages'][client_name]: logging.debug("MessageID %s not yet seen for %s" % (messageID, client_name)) self._waitResponse() return self._sendMessage(client_name, messageID) def lavaSend(self, json_data, client_name): """ A message list won't be seen by the destination until the destination calls lava_wait or lava_wait_all with the messageID If lava_wait is called first, the message will be sent when the client reconnects messages are broadcast - picked up by lava-wait or lava-sync - any call to lava-wait will pick up the complete message. waits are not broadcast - only picked up by lava-wait-all - all calls to lava-wait-all will wait until all clients have used lava-send for the same messageID """ message = self._getMessage(json_data) messageID = self._getMessageID(json_data) logging.info("lavaSend handler in Coordinator received a messageID '%s' for group '%s' from %s" % (messageID, self.group['group'], client_name)) if client_name not in self.group['messages']: self.group['messages'][client_name] = {} # construct the message hash which stores the data from each client separately # but which gets returned as a complete hash upon request msg_hash = {} msg_hash.update({client_name: message}) # always set this client data if the call is made to update the broadcast if messageID not in self.group['messages'][client_name]: self.group['messages'][client_name][messageID] = {} self.group['messages'][client_name][messageID].update(msg_hash) logging.debug("message %s for %s" % (json.dumps(self.group['messages'][client_name][messageID]), client_name)) # now broadcast the message into the other clients in this group for client in self.group['clients']: if client not in self.group['messages']: self.group['messages'][client] = {} if messageID not in self.group['messages'][client]: self.group['messages'][client][messageID] = {} self.group['messages'][client][messageID].update(msg_hash) logging.debug("broadcast %s for %s" % (json.dumps(self.group['messages'][client][messageID]), client)) # separate the waits from the messages for wait-all support if messageID not in self.group['waits']: self.group['waits'][messageID] = {} if client_name not in self.group['waits'][messageID]: self.group['waits'][messageID][client_name] = {} if 'data' not in self.group['waits'][messageID]: self.group['waits'][messageID]['data'] = {} self.group['waits'][messageID]['data'].update(msg_hash) self._ackResponse() def dataReceived(self, json_data): """ Handles all incoming data for the singleton LAVA Coordinator :param json_data: the incoming data stream - expected to be JSON """ if 'request' not in json_data: logging.debug("bad data=%s" % json.dumps(json_data)) self._badRequest() return request = json_data['request'] # retrieve the group data for the group which contains this client and get the client name # self-register using the group_size, if necessary client_name = self._updateData(json_data) if not client_name or not self.group['group']: logging.info("no client_name or group found") self._badRequest() return if request == 'group_data': self._setGroupData(json_data) elif request == "clear_group": self._clearGroupData(json_data) elif request == "aggregate": logging.debug("Aggregate called") self._aggregateBundle(json_data, client_name) elif request == "lava_sync": logging.debug("lava_sync: %s request made by '%s' in group '%s'" % (json.dumps(json_data), client_name, self.group['group'])) self.lavaSync(json_data, client_name) elif request == 'lava_wait_all': logging.debug("lava_wait_all: %s" % json_data) self.lavaWaitAll(json_data, client_name) elif request == 'lava_wait': logging.debug("lava_wait: %s" % json_data) self.lavaWait(json_data, client_name) elif request == 'lava_send': logging.info("lava_send: %s" % json_data) self.lavaSend(json_data, client_name) elif request == "complete": logging.info("coordinator communication for '%s' in group '%s' is complete, closing." % (client_name, self.group['group'])) self.conn.close() else: logging.error("Unrecognised request %s. Closed connection." % json_data) self._badRequest() lava-coordinator-0.1.7/lava/__init__.py0000644000175000017500000000002312472352601017714 0ustar neilneil00000000000000import coordinator