landscape-client-14.01/0000755000175000017500000000000012301414317014535 5ustar andreasandreaslandscape-client-14.01/icons/0000755000175000017500000000000012301414317015650 5ustar andreasandreaslandscape-client-14.01/icons/preferences-management-service.svg0000644000175000017500000000462012301414317024444 0ustar andreasandreas image/svg+xml landscape-client-14.01/pqm-tests.sh0000755000175000017500000000256412301414317017040 0ustar andreasandreas#!/bin/sh ERROR=False echo echo $(date) "==> About to run client test suite on Dapper" echo ssh landscape@durian.canonical.com "/srv/landscape-client-testing/scripts/run_tests.sh ${1}" if [ "$?" != 0 ] then ERROR=True echo $(date) "ERROR running client test suite on Dapper" else echo $(date) "Successfully ran client test suite on Dapper" fi echo echo $(date) "==> About to run client test suite on Feisty" echo ssh landscape@lapsi.canonical.com "/srv/landscape-client-testing/scripts/run_tests.sh ${1}" if [ "$?" != 0 ] then ERROR=True echo $(date) "ERROR running client test suite on Feisty" else echo $(date) "Successfully ran client test suite on Feisty" fi echo echo $(date) "==> About to run client test suite on Gutsy" echo ssh landscape@goumi.canonical.com "/srv/landscape-client-testing/scripts/run_tests.sh ${1}" if [ "$?" != 0 ] then ERROR=True echo $(date) "ERROR running client test suite on Gutsy" else echo $(date) "Successfully ran client test suite on Gutsy" fi echo echo $(date) "==> About to run client test suite on Hardy" echo ssh landscape@arhat.canonical.com "/srv/landscape-client-testing/scripts/run_tests.sh ${1}" if [ "$?" != 0 ] then ERROR=True echo $(date) "ERROR running client test suite on Hardy" else echo $(date) "Successfully ran client test suite on Hardy" fi if [ "$ERROR" = "True" ] then exit 1 fi landscape-client-14.01/Makefile0000644000175000017500000001064112301414317016177 0ustar andreasandreasPYDOCTOR ?= pydoctor TXT2MAN ?= txt2man PYTHON ?= python TRIAL_ARGS ?= TEST_COMMAND = trial --unclean-warnings $(TRIAL_ARGS) landscape UBUNTU_RELEASE := $(shell lsb_release -cs) # version in the code is authoritative # Use := here, not =, it's really important, otherwise UPSTREAM_VERSION # will be updated behind your back with the current result of that # command everytime it is mentioned/used. UPSTREAM_VERSION := $(shell python -c "from landscape import UPSTREAM_VERSION; print UPSTREAM_VERSION") CHANGELOG_VERSION := $(shell dpkg-parsechangelog | grep ^Version | cut -f 2 -d " " | cut -f 1 -d '-') BZR_REVNO := $(shell bzr revno) ifeq (+bzr,$(findstring +bzr,$(UPSTREAM_VERSION))) TARBALL_VERSION := $(UPSTREAM_VERSION) else TARBALL_VERSION := $(UPSTREAM_VERSION)+bzr$(BZR_REVNO) endif all: build build: $(PYTHON) setup.py build_ext -i check: build @if [ -z "$$DBUS_SESSION_BUS_ADDRESS" ]; then \ OUTPUT=`dbus-daemon --print-address=1 --print-pid=1 --session --fork`; \ export DBUS_SESSION_BUS_ADDRESS=`echo $$OUTPUT | cut -f1 -d ' '`; \ DBUS_PID=`echo $$OUTPUT | cut -f2 -d ' '`; \ trap "kill $$DBUS_PID" EXIT; \ fi; \ if [ -z "$$DISPLAY" ]; then \ xvfb-run $(TEST_COMMAND); \ else \ $(TEST_COMMAND); \ fi lint: bzr ls-lint pyflakes: -pyflakes `find landscape -name \*py|grep -v twisted_amp\.py|grep -v configobj\.py|grep -v mocker\.py` clean: -find landscape -name \*.pyc -exec rm {} \; -rm tags -rm _trial_temp -rf -rm docs/api -rf; -rm man/\*.1 -rf -rm sdist -rf doc: docs/api/twisted/pickle mkdir -p docs/api ${PYDOCTOR} --make-html --html-output docs/api --add-package landscape --extra-system=docs/api/twisted/pickle:twisted/ docs/api/twisted/pickle: mkdir -p docs/api/twisted -${PYDOCTOR} --make-html --html-output docs/api/twisted --add-package /usr/share/pyshared/twisted -o docs/api/twisted/pickle manpages: LC_ALL=C ${TXT2MAN} -P Landscape -s 1 -t landscape-client < man/landscape-client.txt > man/landscape-client.1 LC_ALL=C ${TXT2MAN} -P Landscape -s 1 -t landscape-config < man/landscape-config.txt > man/landscape-config.1 LC_ALL=C ${TXT2MAN} -P Landscape -s 1 -t landscape-message < man/landscape-message.txt > man/landscape-message.1 LC_ALL=C ${TXT2MAN} -P Landscape -s 1 -t landscape-sysinfo < man/landscape-sysinfo.txt > man/landscape-sysinfo.1 origtarball: sdist cp -f sdist/landscape-client-$(TARBALL_VERSION).tar.gz \ ../landscape-client_$(TARBALL_VERSION).orig.tar.gz prepchangelog: # add a temporary entry for a local build if needed ifeq (,$(findstring +bzr,$(CHANGELOG_VERSION))) dch -v $(TARBALL_VERSION)-0ubuntu0 "New local test build" --distribution $(UBUNTU_RELEASE) else # just update the timestamp dch --distribution $(UBUNTU_RELEASE) --release $(UBUNTU_RELEASE) endif updateversion: sed -i -e "s/^UPSTREAM_VERSION.*/UPSTREAM_VERSION = \"$(TARBALL_VERSION)\"/g" \ landscape/__init__.py package: clean prepchangelog updateversion debuild -b $(DEBUILD_OPTS) sourcepackage: clean origtarball prepchangelog updateversion # need to remove sdist here because it doesn't exist in the # orig tarball rm -rf sdist debuild -S $(DEBUILD_OPTS) MESSAGE_DIR = `pwd`/runclient-messages LOG_FILE = `pwd`/runclient.log reinstall: -sudo dpkg -P landscape-client -sudo rm -rf /var/log/landscape /etc/landscape /var/lib/landscape /etc/default/landscape-client -sudo apt-get install landscape-client freshdata: -sudo rm -rf $(MESSAGE_DIR) -sudo mkdir $(MESSAGE_DIR) run: -sudo ./landscape-client \ -a onward -t "John's PC" \ -u http://localhost:8080/message-system \ -d $(MESSAGE_DIR) \ --urgent-exchange-interval=5 \ --log-level=debug \ --ping-url=http://localhost:8081/ping \ freshrun: freshdata run tags: -ctags --languages=python -R . etags: -etags --languages=python -R . sdist: clean mkdir -p sdist # --uncommitted because we want any changes the developer might have made # locally to be included in the package without having to commit bzr export --uncommitted sdist/landscape-client-$(TARBALL_VERSION) rm -rf sdist/landscape-client-$(TARBALL_VERSION)/debian sed -i -e "s/^UPSTREAM_VERSION.*/UPSTREAM_VERSION = \"$(TARBALL_VERSION)\"/g" \ sdist/landscape-client-$(TARBALL_VERSION)/landscape/__init__.py cd sdist && tar cfz landscape-client-$(TARBALL_VERSION).tar.gz landscape-client-$(TARBALL_VERSION) cd sdist && md5sum landscape-client-$(TARBALL_VERSION).tar.gz > landscape-client-$(TARBALL_VERSION).tar.gz.md5 rm -rf sdist/landscape-client-$(TARBALL_VERSION) .PHONY: tags etags landscape-client-14.01/landscape/0000755000175000017500000000000012301414317016467 5ustar andreasandreaslandscape-client-14.01/landscape/schema.py0000644000175000017500000001626412301414317020312 0ustar andreasandreas"""A schema system. Yes. Another one!""" class InvalidError(Exception): """Raised when invalid input is received.""" pass class Constant(object): """Something that must be equal to a constant value.""" def __init__(self, value): self.value = value def coerce(self, value): if value != self.value: raise InvalidError("%r != %r" % (value, self.value)) return value class Any(object): """Something which must apply to any of a number of different schemas. @param schemas: Other schema objects. """ def __init__(self, *schemas): self.schemas = schemas def coerce(self, value): """ The result of the first schema which doesn't raise L{InvalidError} from its C{coerce} method will be returned. """ for schema in self.schemas: try: return schema.coerce(value) except InvalidError: pass raise InvalidError("%r did not match any schema in %s" % (value, self.schemas)) class Bool(object): """Something that must be a C{bool}.""" def coerce(self, value): if not isinstance(value, bool): raise InvalidError("%r is not a bool" % (value,)) return value class Int(object): """Something that must be an C{int} or C{long}.""" def coerce(self, value): if not isinstance(value, (int, long)): raise InvalidError("%r isn't an int or long" % (value,)) return value class Float(object): """Something that must be an C{int}, C{long}, or C{float}.""" def coerce(self, value): if not isinstance(value, (int, long, float)): raise InvalidError("%r isn't a float" % (value,)) return value class Bytes(object): """A binary string.""" def coerce(self, value): if not isinstance(value, str): raise InvalidError("%r isn't a str" % (value,)) return value class Unicode(object): """Something that must be a C{unicode}. If the value is a C{str}, it will automatically be decoded. @param encoding: The encoding to automatically decode C{str}s with. """ def __init__(self, encoding="utf-8"): self.encoding = encoding def coerce(self, value): if isinstance(value, str): try: value = value.decode(self.encoding) except UnicodeDecodeError, e: raise InvalidError("%r can't be decoded: %s" % (value, str(e))) if not isinstance(value, unicode): raise InvalidError("%r isn't a unicode" % (value,)) return value class List(object): """Something which must be a C{list}. @param schema: The schema that all values of the list must match. """ def __init__(self, schema): self.schema = schema def coerce(self, value): if not isinstance(value, list): raise InvalidError("%r is not a list" % (value,)) new_list = list(value) for i, subvalue in enumerate(value): try: new_list[i] = self.schema.coerce(subvalue) except InvalidError, e: raise InvalidError( "%r could not coerce with %s: %s" % (subvalue, self.schema, e)) return new_list class Tuple(object): """Something which must be a fixed-length tuple. @param schema: A sequence of schemas, which will be applied to each value in the tuple respectively. """ def __init__(self, *schema): self.schema = schema def coerce(self, value): if not isinstance(value, tuple): raise InvalidError("%r is not a tuple" % (value,)) if len(value) != len(self.schema): raise InvalidError("Need %s items, got %s in %r" % (len(self.schema), len(value), value)) new_value = [] for schema, value in zip(self.schema, value): new_value.append(schema.coerce(value)) return tuple(new_value) class KeyDict(object): """Something which must be a C{dict} with defined keys. The keys must be constant and the values must match a per-key schema. @param schema: A dict mapping keys to schemas that the values of those keys must match. """ def __init__(self, schema, optional=None): if optional is None: optional = [] self.optional = set(optional) self.schema = schema def coerce(self, value): new_dict = {} if not isinstance(value, dict): raise InvalidError("%r is not a dict." % (value,)) for k, v in value.iteritems(): if k not in self.schema: raise InvalidError("%r is not a valid key as per %r" % (k, self.schema)) try: new_dict[k] = self.schema[k].coerce(v) except InvalidError, e: raise InvalidError( "Value of %r key of dict %r could not coerce with %s: %s" % (k, value, self.schema[k], e)) new_keys = set(new_dict.keys()) required_keys = set(self.schema.keys()) - self.optional missing = required_keys - new_keys if missing: raise InvalidError("Missing keys %s" % (missing,)) return new_dict class Dict(object): """Something which must be a C{dict} with arbitrary keys. @param key_schema: The schema that keys must match. @param value_schema: The schema that values must match. """ def __init__(self, key_schema, value_schema): self.key_schema = key_schema self.value_schema = value_schema def coerce(self, value): if not isinstance(value, dict): raise InvalidError("%r is not a dict." % (value,)) new_dict = {} for k, v in value.items(): new_dict[self.key_schema.coerce(k)] = self.value_schema.coerce(v) return new_dict class Message(KeyDict): """ Like L{KeyDict}, but with three predefined keys: C{type}, C{api}, and C{timestamp}. Of these, C{api} and C{timestamp} are optional. @param type: The type of the message. The C{type} key will need to match this as a constant. @param schema: A dict of additional schema in a format L{KeyDict} will accept. @param optional: An optional list of keys that should be optional. """ def __init__(self, type, schema, optional=None): self.type = type schema["timestamp"] = Float() schema["api"] = Any(Bytes(), Constant(None)) schema["type"] = Constant(type) if optional is not None: optional.extend(["timestamp", "api"]) else: optional = ["timestamp", "api"] super(Message, self).__init__(schema, optional=optional) def coerce(self, value): for k in value.keys(): if k not in self.schema: # We don't know about this field, just discard it. This # is useful when a client that introduced some new field # in a message talks to an older server, that don't understand # the new field yet. value.pop(k) return super(Message, self).coerce(value) landscape-client-14.01/landscape/plugin.py0000644000175000017500000000333312301414317020341 0ustar andreasandreasfrom logging import info from landscape.log import format_object class PluginConfigError(Exception): """There was an error registering or configuring a plugin.""" class PluginRegistry(object): """A central integration point for plugins.""" def __init__(self): self._plugins = [] self._plugin_names = {} def add(self, plugin): """Register a plugin. The plugin's C{register} method will be called with this registry as its argument. If the plugin has a C{plugin_name} attribute, it will be possible to look up the plugin later with L{get_plugin}. """ info("Registering plugin %s.", format_object(plugin)) self._plugins.append(plugin) if hasattr(plugin, "plugin_name"): self._plugin_names[plugin.plugin_name] = plugin plugin.register(self) def get_plugins(self): """Get the list of plugins.""" return self._plugins def get_plugin(self, name): """Get a particular plugin by name.""" return self._plugin_names[name] class Plugin(object): """A convenience for writing plugins. This provides a register method which will set up a bunch of reactor handlers in the idiomatic way. If C{run} is defined on subclasses, it will be called every C{run_interval} seconds after being registered. @cvar run_interval: The interval, in seconds, to execute the C{run} method. If set to C{None}, then C{run} will not be scheduled. """ run_interval = 5 def register(self, registry): self.registry = registry if hasattr(self, "run") and self.run_interval is not None: registry.reactor.call_every(self.run_interval, self.run) landscape-client-14.01/landscape/accumulate.py0000644000175000017500000001072612301414317021172 0ustar andreasandreas""" The accumulation logic generates data points for times that are a multiple of a step size. In other words, if the step size is 300 seconds, any data reported by the accumulation code will always be for a timestamp that is a multiple of 300. The purpose of this behaviour is to (a) limit the amount of data that is sent to the server and (b) provide data in a predictable format to make server-side handling of the data straight-forward. A nice side-effect of providing data at a known step-interval is that the server can detect blackholes in the data simply by testing for the absence of data points at step intervals. Limiting the amount of data sent to the server and making the data format predictable are both desirable attributes, but we need to ensure the data reported is accurate. We can't rely on plugins to report data exactly at step boundaries and even if we could we wouldn't necessarily end up with data points that are representative of the resource being monitored. We need a way to calculate a representative data point from the set of data points that a plugin provided during a step period. Suppose we want to calculate data points for timestamps 300 and 600. Assume a plugin runs at an interval less than 300 seconds to get values to provide to the accumulator. Each value received by the accumulator is used to update a data point that will be sent to the server when we cross the step boundary. The algorithm, based on derivatives, is: (current time - previous time) * value + last accumulated value If the 'last accumulated value' isn't available, it defaults to 0. For example, consider these timestamp/load average measurements: 300/2.0, 375/3.0, 550/3.5 and 650/0.5. Also assume we have no data prior to 300/2.0. This data would be processed as follows: Input Calculation Accumulated Value ----- ----------- ----------------- 300/2.0 (300 - 300) * 2.0 + 0 0.0 375/3.0 (375 - 300) * 3.0 + 0.0 225.0 550/3.5 (550 - 375) * 3.5 + 225.0 837.5 650/0.5 (600 - 550) * 0.5 + 837.5 862.5 Notice that the last value crosses a step boundary; the calculation for this value is: (step boundary time - previous time) * value + last accumulated value This yields the final accumulated value for the step period we've just traversed. The data point sent to the server is generated using the following calculation: accumulated value / step interval size The data point sent to the server in our example would be: 862.5 / 300 = 2.875 This value is representative of the activity that actually occurred and is returned to the plugin to queue for delivery to the server. The accumulated value for the next interval is calculated using the portion of time that crossed into the new step period: Input Calculation Accumulated Value ----- ----------- ----------------- 650/0.5 (650 - 600) * 0.5 + 0 25 And so the logic goes, continuing in a similar fashion, yielding representative data at each step boundary. """ class Accumulator(object): def __init__(self, persist, step_size): self._persist = persist self._step_size = step_size def __call__(self, new_timestamp, new_free_space, key): previous_timestamp, accumulated_value = self._persist.get(key, (0, 0)) accumulated_value, step_data = \ accumulate(previous_timestamp, accumulated_value, new_timestamp, new_free_space, self._step_size) self._persist.set(key, (new_timestamp, accumulated_value)) return step_data def accumulate(previous_timestamp, accumulated_value, new_timestamp, new_value, step_size): previous_step = previous_timestamp // step_size new_step = new_timestamp // step_size step_boundary = new_step * step_size step_diff = new_step - previous_step step_data = None if step_diff == 0: diff = new_timestamp - previous_timestamp accumulated_value += diff * new_value elif step_diff == 1: diff = step_boundary - previous_timestamp accumulated_value += diff * new_value step_value = float(accumulated_value) / step_size step_data = (step_boundary, step_value) diff = new_timestamp - step_boundary accumulated_value = diff * new_value else: diff = new_timestamp - step_boundary accumulated_value = diff * new_value return accumulated_value, step_data landscape-client-14.01/landscape/diff.py0000644000175000017500000000117612301414317017756 0ustar andreasandreasdef diff(old, new): """Returns the set of differences between two C{dict}s. @return: A 3-tuple of dicts with the changes that would need to be made to convert C{old} into C{new}: C{(creates, updates, deletes)} """ new_keys = set(new.iterkeys()) old_keys = set(old.iterkeys()) creates = {} for key in new_keys - old_keys: creates[key] = new[key] updates = {} for key in old_keys & new_keys: if old[key] != new[key]: updates[key] = new[key] deletes = {} for key in old_keys - new_keys: deletes[key] = old[key] return creates, updates, deletes landscape-client-14.01/landscape/user/0000755000175000017500000000000012301414317017445 5ustar andreasandreaslandscape-client-14.01/landscape/user/changes.py0000644000175000017500000001232612301414317021433 0ustar andreasandreasfrom landscape.diff import diff class UserChanges(object): """Detect changes made since the last snapshot was taken. If no snapshot is available all users and groups are reported. When a snapshot is available, only the changes between the current state and the snapshotted state are transmitted to the server. """ def __init__(self, persist, provider): super(UserChanges, self).__init__() self._persist = persist self._provider = provider # FIXME This shouldn't really be necessary. Not having it # here with the current factoring is also problematic. Figure # out a clean way to factor this. Gustavo suggested splitting # it into _build_old_data and _build_new_data and just calling # that from the necessary places. self._refresh() def _refresh(self): """Load the previous snapshot and update current data.""" self._old_users = self._persist.get("users", {}) self._old_groups = self._persist.get("groups", {}) self._new_users = self._create_index( "username", self._provider.get_users()) self._new_groups = self._create_index( "name", self._provider.get_groups()) def snapshot(self): """Save the current state and use it as a comparison snapshot.""" self._persist.set("users", self._new_users) self._persist.set("groups", self._new_groups) def clear(self): """ Reset the snapshot state and forget all knowledge of users and groups. """ self._persist.remove("users") self._persist.remove("groups") def _create_index(self, key, sequence): """ Given a key and a sequence of dicts, return a dict of the form C{{dict[key]: dict, ...}}. """ index = {} for data in sequence: index[data[key]] = data return index def create_diff(self): """Returns the changes since the last snapshot. See landscape.message_schemas.USERS schema for a description of the dictionary returned by this method. """ self._refresh() changes = {} changes.update(self._detect_user_changes()) changes.update(self._detect_group_changes()) return changes def _detect_user_changes(self): """ Compare the current user snapshot to the old one and return a C{dict} with C{create-users}, C{update-users} and C{delete-users} fields. Fields without data aren't included in the result. """ changes = {} creates, updates, deletes = diff(self._old_users, self._new_users) if creates: changes["create-users"] = list(creates.itervalues()) if updates: changes["update-users"] = list(updates.itervalues()) if deletes: changes["delete-users"] = list(deletes.iterkeys()) return changes def _detect_group_changes(self): """ Compare the current group snapshot to the old one and create a C{dict} with C{create-groups}, C{delete-groups}, C{create-group-members} and {delete-group-members} fields. Fields without data aren't included in the result. """ changes = {} creates, updates, deletes = diff(self._old_groups, self._new_groups) if creates: groups = [] create_members = {} for value in creates.itervalues(): # Use a copy to avoid removing the 'members' element # from stored data. value = value.copy() members = value.pop("members") if members: create_members[value["name"]] = members groups.append(value) changes["create-groups"] = groups if create_members: changes["create-group-members"] = create_members if updates: remove_members = {} create_members = {} update_groups = [] for groupname, new_data in updates.iteritems(): old_data = self._old_groups[groupname] old_members = set(old_data["members"]) new_members = set(new_data["members"]) created = new_members - old_members if created: create_members[groupname] = sorted(created) removed = old_members - new_members if removed: remove_members[groupname] = sorted(removed) if old_data["gid"] != new_data["gid"]: update_groups.append({"name": groupname, "gid": new_data["gid"]}) if create_members: members = changes.setdefault("create-group-members", {}) members.update(create_members) if remove_members: members = changes.setdefault("delete-group-members", {}) members.update(remove_members) if update_groups: members = changes.setdefault("update-groups", []) members.extend(update_groups) if deletes: changes["delete-groups"] = deletes.keys() return changes landscape-client-14.01/landscape/user/provider.py0000644000175000017500000001421412301414317021653 0ustar andreasandreasfrom pwd import struct_passwd from grp import struct_group import csv import subprocess import logging class UserManagementError(Exception): """Catch all error for problems with User Management.""" class UserNotFoundError(Exception): """Raised when a user couldn't be found by uid/username.""" class GroupNotFoundError(Exception): """Raised when a group couldn't be found by gid/groupname.""" class UserProviderBase(object): """This is a base class for user Providers.""" def __init__(self, locked_users=None): self.locked_users = locked_users or [] self._min_uid = 1000 self._max_uid = 60000 def get_users(self): """Returns a list of all local users on the computer. Each user is represented as a dict with the keys: C{username}, C{name}, C{uid}, C{enabled}, C{location}, C{work-phone} and C{home-phone}. """ users = [] found_usernames = set() for user in self.get_user_data(): if not isinstance(user, struct_passwd): user = struct_passwd(user) if user.pw_name in found_usernames: continue gecos_data = [x.decode("utf-8", "replace") or None for x in user.pw_gecos.split(",")[:4]] while len(gecos_data) < 4: gecos_data.append(None) name, location, work_phone, home_phone = tuple(gecos_data) enabled = user.pw_name not in self.locked_users users.append({"username": user.pw_name, "name": name, "uid": user.pw_uid, "enabled": enabled, "location": location, "work-phone": work_phone, "home-phone": home_phone, "primary-gid": user.pw_gid}) found_usernames.add(user.pw_name) return users def get_groups(self): """Returns a list of groups on the computer. Each group is represented as a dict with the keys: C{name}, C{gid} and C{members}. """ user_names = set([x["username"] for x in self.get_users()]) groups = [] found_groupnames = set() for group in self.get_group_data(): if not isinstance(group, struct_group): group = struct_group(group) if group.gr_name in found_groupnames: continue member_names = user_names.intersection(group.gr_mem) groups.append({"name": group.gr_name, "gid": group.gr_gid, "members": list(member_names)}) found_groupnames.add(group.gr_name) return groups def get_uid(self, username): """Returns the UID for C{username}. @raises UserNotFoundError: Raised if C{username} doesn't match a user on the computer. """ for data in self.get_users(): if data["username"] == username: return data["uid"] raise UserNotFoundError("UID not found for user %s." % username) def get_gid(self, groupname): """Returns the GID for C{groupname}. @raises UserManagementError: Raised if C{groupname} doesn't match a group on the computer. """ for data in self.get_groups(): if data["name"] == groupname: return data["gid"] raise GroupNotFoundError("Group not found for group %s." % groupname) class UserProvider(UserProviderBase): popen = subprocess.Popen passwd_fields = ["username", "passwd", "uid", "primary-gid", "gecos", "home", "shell"] group_fields = ["name", "passwd", "gid", "members"] def __init__(self, locked_users=[], passwd_file="/etc/passwd", group_file="/etc/group"): super(UserProvider, self).__init__(locked_users) self._passwd_file = passwd_file self._group_file = group_file def get_user_data(self): """ Parse passwd(5) formatted files and return tuples of user data in the form (username, password, uid, primary-group-id, gecos data, home directory, path to the user's shell) """ user_data = [] passwd_file = open(self._passwd_file, "r") reader = csv.DictReader(passwd_file, fieldnames=self.passwd_fields, delimiter=":", quoting=csv.QUOTE_NONE) current_line = 0 for row in reader: current_line += 1 # This skips the NIS user marker in the passwd file. if (row["username"].startswith("+") or row["username"].startswith("-")): continue try: user_data.append((row["username"], row["passwd"], int(row["uid"]), int(row["primary-gid"]), row["gecos"], row["home"], row["shell"])) except (ValueError, TypeError): logging.warn("passwd file %s is incorrectly formatted: " "line %d." % (self._passwd_file, current_line)) passwd_file.close() return user_data def get_group_data(self): """ Parse group(5) formatted files and return tuples of group data in the form (groupname, group password, group id and a list of member usernames). """ group_data = [] group_file = open(self._group_file, "r") reader = csv.DictReader(group_file, fieldnames=self.group_fields, delimiter=":", quoting=csv.QUOTE_NONE) current_line = 0 for row in reader: current_line += 1 # Skip if we find the NIS marker if (row["name"].startswith("+") or row["name"].startswith("-")): continue try: group_data.append((row["name"], row["passwd"], int(row["gid"]), row["members"].split(","))) except (AttributeError, ValueError): logging.warn("group file %s is incorrectly formatted: " "line %d." % (self._group_file, current_line)) group_file.close() return group_data landscape-client-14.01/landscape/user/tests/0000755000175000017500000000000012301414317020607 5ustar andreasandreaslandscape-client-14.01/landscape/user/tests/helpers.py0000644000175000017500000001547112301414317022633 0ustar andreasandreasfrom landscape.user.management import UserManagementError from landscape.user.provider import UserProviderBase class FakeUserManagement(object): def __init__(self, provider=None): self.shadow_file = getattr(provider, "shadow_file", None) self.provider = provider self._users = {} for data in self.provider.get_users(): self._users[data["username"]] = data self._groups = {} for data in self.provider.get_groups(): self._groups[data["name"]] = data def _make_fake_shadow_file(self, locked_users, unlocked_users): entry = "%s:%s:13348:0:99999:7:::\n" shadow_file = open(self.shadow_file, "w") for user in locked_users: shadow_file.write(entry % (user, "!")) for user in unlocked_users: shadow_file.write(entry % (user, "qweqweqeqweqw")) shadow_file.close() def add_user(self, username, name, password, require_password_reset, primary_group_name, location, work_phone, home_phone): try: uid = 1000 if self._users: uid = max([x["uid"] for x in self._users.itervalues()]) + 1 if self._groups: primary_gid = self.get_gid(primary_group_name) else: primary_gid = uid self._users[uid] = {"username": username, "name": name, "uid": uid, "enabled": True, "location": location, "work-phone": work_phone, "home-phone": home_phone, "primary-gid": primary_gid} gecos_string = "%s,%s,%s,%s" % (name, location or "", work_phone or "", home_phone or "") userdata = (username, "x", uid, primary_gid, gecos_string, "/bin/sh" , "/home/user") self.provider.users.append(userdata) except KeyError: raise UserManagementError("add_user failed") return "add_user succeeded" def lock_user(self, username): data = self._users.get(username, None) if data: data["enabled"] = False # This will generate a shadow file with only the locked user in it. self._make_fake_shadow_file([username], []) return "lock_user succeeded" raise UserManagementError("lock_user failed") def unlock_user(self, username): data = self._users.get(username, None) if data: data["enabled"] = True # This will generate a shadow file with only the unlocked user in it. self._make_fake_shadow_file([], [username]) return "unlock_user succeeded" raise UserManagementError("unlock_user failed") def remove_user(self, username, delete_home=False): try: del self._users[username] except KeyError: raise UserManagementError("remove_user failed") remaining_users = [] for user in self.provider.users: if user[0] != username: remaining_users.append(user) self.provider.users = remaining_users return "remove_user succeeded" def set_user_details(self, username, password=None, name=None, location=None, work_number=None, home_number=None, primary_group_name=None): data = self._users.setdefault(username, {}) for key, value in [("name", name), ("location", location), ("work-phone", work_number), ("home-phone", home_number)]: if value: data[key] = value if primary_group_name: data["primary-gid"] = self.get_gid(primary_group_name) else: data["primary-gid"] = None userdata = (username, "x", data["uid"], data["primary-gid"], "%s,%s,%s,%s," % (name, location, work_number, home_number), "/bin/sh" , "/home/user") self.provider.users = [userdata] return "set_user_details succeeded" def get_gid(self, name): try: return self._groups[name]["gid"] except KeyError: raise UserManagementError("Group %s wasn't found." % name) def add_group(self, name): gid = 1000 if self._groups: gid = max([x["gid"] for x in self._groups.itervalues()]) + 1 self._groups[name] = {"name": name, "gid": gid, "members": []} self.update_provider_from_groups() return "add_group succeeded" def set_group_details(self, group, new_name): data = self._groups[group] data["name"] = new_name self._groups[new_name] = data del self._groups[group] self.update_provider_from_groups() return "set_group_details succeeded" def add_group_member(self, username, group): data = self._groups[group] if data: data["members"].append(username) self.update_provider_from_groups() return "add_group_member succeeded" raise UserManagementError("add_group_member failed") def remove_group_member(self, username, group): if group in self._groups: data = self._groups[group] data["members"].remove(username) self.update_provider_from_groups() return "remove_group_member succeeded" raise UserManagementError("remove_group_member failed") def remove_group(self, group): del self._groups[group] self.update_provider_from_groups() return "remove_group succeeded" def update_provider_from_groups(self): provider_list = [] for k, v in self._groups.iteritems(): provider_list.append((k, "x", v["gid"], v["members"])) self.provider.groups = provider_list class FakeUserProvider(UserProviderBase): def __init__(self, users=None, groups=None, popen=None, shadow_file=None, locked_users=None): self.users = users self.groups = groups if popen: self.popen = popen self.shadow_file = shadow_file super(FakeUserProvider, self).__init__(locked_users=locked_users) def get_user_data(self, system=False): if self.users is None: self.users = [] return self.users def get_group_data(self): if self.groups is None: self.groups = [] return self.groups class FakeUserInfo(object): """Implements enough functionality to work for Changes tests.""" persist_name = "users" run_interval = 60 def __init__(self, provider): self._provider = provider def register(self, manager): self._manager = manager self._persist = self._manager.persist.root_at("users") landscape-client-14.01/landscape/user/tests/test_changes.py0000644000175000017500000003104412301414317023632 0ustar andreasandreasfrom landscape.lib.persist import Persist from landscape.user.changes import UserChanges from landscape.user.tests.helpers import FakeUserInfo, FakeUserProvider from landscape.tests.helpers import LandscapeTest, MonitorHelper class UserChangesTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(UserChangesTest, self).setUp() self.persist = Persist() self.shadow_file = self.makeFile("""\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: """) def test_no_existing_snapshot(self): """ The diff created by L{UserChanges.create_diff} contains data for all users and groups if an existing snapshot isn't available. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 1000, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) self.assertEqual(changes.create_diff(), {"create-users": [{"username": "jdoe", "home-phone": None, "name": u"JD", "enabled": True, "location": None, "work-phone": None, "uid": 1000, "primary-gid": 1000}], "create-groups": [{"gid": 1000, "name": "webdev"}], "create-group-members": {"webdev": ["jdoe"]}}) def test_snapshot(self): """ When a snapshot is taken it should persist beyond instance invocations and be used as the baseline in L{UserChanges.create_diff} until another snapshot is taken. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 1000, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes1 = UserChanges(self.persist, provider) self.assertTrue(changes1.create_diff()) changes1.snapshot() changes2 = UserChanges(self.persist, provider) self.assertFalse(changes2.create_diff()) def test_snapshot_before_diff(self): """ A valid snapshot should be created if L{UserChanges.snapshot} is called before L{UserChanges.create_diff}. When L{UserChanges.create_diff} is called it shouln't report any changes. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 1000, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.snapshot() self.assertFalse(changes.create_diff()) def test_clear(self): """ L{UserChanges.clear} removes a snapshot, if present, returning the object to a pristine state. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 1000, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) self.assertTrue(changes.create_diff()) changes.snapshot() self.assertFalse(changes.create_diff()) changes.clear() self.assertTrue(changes.create_diff()) def test_create_diff_without_changes(self): """ L{UserChanges.create_diff} should return an empty C{dict} if users and groups are unchanged since the last snapshot. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] provider = FakeUserProvider(users=users) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() self.assertEqual(changes.create_diff(), {}) def test_add_user(self): """ L{UserChanges.create_diff} should report new users created externally with C{adduser} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] provider = FakeUserProvider(users=users) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() users.append(("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")) self.assertEqual(changes.create_diff(), {"create-users": [{"username": "bo", "home-phone": None, "name": u"Bo", "enabled": True, "location": None, "work-phone": None, "uid": 1001, "primary-gid": 1001}]}) def test_update_user(self): """ L{UserChanges.create_diff} should report users modified externally with C{usermod} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] provider = FakeUserProvider(users=users) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() users[0] = ("jdoe", "x", 1000, 1001, "John Doe,Here,789WORK,321HOME", "/home/john", "/bin/zsh") self.assertEqual(changes.create_diff(), {"update-users": [{"username": "jdoe", "home-phone": u"321HOME", "name": u"John Doe", "enabled": True, "location": "Here", "work-phone": "789WORK", "uid": 1000, "primary-gid": 1001}]}) def test_delete_user(self): """ L{UserChanges.create_diff} should report users removed externally with C{deluser} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] provider = FakeUserProvider(users=users) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() users.pop() self.assertEqual(changes.create_diff(), {"delete-users": ["bo"]}) def test_add_group(self): """ L{UserChanges.create_diff} should report new groups created externally with C{addgroup} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups.append(("bizdev", "x", 60, [])) self.assertEqual(changes.create_diff(), {"create-groups": [{"gid": 60, "name": "bizdev"}]}) def test_add_group_with_members(self): """ L{UserChanges.create_diff} should report new groups and new members created externally with C{addgroup} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups.append(("bizdev", "x", 60, ["jdoe"])) self.assertEqual(changes.create_diff(), {"create-groups": [{"gid": 60, "name": "bizdev"}], "create-group-members": {"bizdev": ["jdoe"]}}) def test_update_group(self): """ L{UserChanges.create_diff} should report groups modified externally with C{groupmod} or similar tools. """ groups = [("webdev", "x", 1000, [])] provider = FakeUserProvider(groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 1001, []) self.assertEqual(changes.create_diff(), {"update-groups": [{"gid": 1001, "name": "webdev"}]}) def test_add_group_members(self): """ L{UserChanges.create_diff} should report new members added to groups externally with C{gpasswd} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 50, ["jdoe", "bo"]) self.assertEqual(changes.create_diff(), {"create-group-members": {"webdev": ["bo"]}}) def test_delete_group_members(self): """ L{UserChanges.create_diff} should report members removed from groups externally with C{gpasswd} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups[0] = ("webdev", "x", 50, []) self.assertEqual(changes.create_diff(), {"delete-group-members": {"webdev": ["jdoe"]}}) def test_delete_group(self): """ L{UserChanges.create_diff} should report groups removed externally with C{delgroup} or similar tools. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"]), ("sales", "x", 60, [])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() groups.pop() self.assertEqual(changes.create_diff(), {"delete-groups": ["sales"]}) def test_complex_changes(self): """ L{UserChanges.create_diff} should be able to report multiple kinds of changes at the same time. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh"), ("bo", "x", 1001, 1001, "Bo,,,,", "/home/bo", "/bin/sh")] groups = [("webdev", "x", 50, ["jdoe"]), ("bizdev", "x", 60, ["bo"])] provider = FakeUserProvider(users=users, groups=groups) FakeUserInfo(provider=provider) changes = UserChanges(self.persist, provider) changes.create_diff() changes.snapshot() # We remove the group "webdev", and create a new group # "developers", adding the user "bo" at the same time. groups[0] = ("developers", "x", 50, ["bo"]) # Add a new group "sales" and a new group member, "bo" groups.append(("sales", "x", 70, ["bo"])) # Remove user "jdoe" users.pop(0) self.assertEqual(changes.create_diff(), {"create-groups": [{"gid": 50, "name": "developers"}, {"gid": 70, "name": "sales"}], "delete-users": ["jdoe"], "delete-groups": ["webdev"], "create-group-members": {"developers": ["bo"], "sales": ["bo"]}}) landscape-client-14.01/landscape/user/tests/test_management.py0000644000175000017500000006613512301414317024347 0ustar andreasandreasfrom landscape.lib import md5crypt from landscape.user.management import UserManagement, UserManagementError from landscape.user.tests.helpers import FakeUserProvider from landscape.user.provider import UserNotFoundError, GroupNotFoundError from landscape.tests.helpers import LandscapeTest, MockPopen def guess_password(generated_password, plaintext_password): salt = generated_password[len("$1$"):generated_password.rfind("$")] crypted = md5crypt.md5crypt(plaintext_password, salt) return crypted class UserWriteTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.shadow_file = self.makeFile("""\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: """) def test_add_user(self): """L{UserManagement.add_user} should use C{adduser} to add users.""" groups = [("users", "x", 1001, [])] provider = FakeUserProvider(groups=groups, popen=MockPopen("")) management = UserManagement(provider=provider) management.add_user("jdoe", "John Doe", "password", False, "users", "Room 101", "+123456", None) self.assertEqual(len(provider.popen.popen_inputs), 2) self.assertEqual(provider.popen.popen_inputs[0], ["adduser", "jdoe", "--disabled-password", "--gecos", "John Doe,Room 101,+123456,", "--gid", "1001"]) usermod = provider.popen.popen_inputs[1] self.assertEqual(len(usermod), 4, usermod) password = guess_password(usermod[2], "password") self.assertEqual(usermod, ["usermod", "-p", password, "jdoe"]) def test_add_user_error(self): """ L{UserManagement.add_user} should raise an L{UserManagementError} if C{adduser} fails. """ provider = FakeUserProvider(popen=MockPopen("", return_codes=[1, 0])) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.add_user, "jdoe", u"John Doe", "password", False, None, None, None, None) def test_change_password_error(self): """ L{UserManagement.add_user} should raise an L{UserManagementError} if C{usermod} fails. """ provider = FakeUserProvider(popen=MockPopen("", return_codes=[0, 1])) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.add_user, "jdoe", u"John Doe", "password", False, None, None, None, None) def test_expire_password_error(self): """ L{UserManagement.add_user} should raise an L{UserManagementError} if C{passwd} fails. """ provider = FakeUserProvider( popen=MockPopen("", return_codes=[0, 0, 1])) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.add_user, "jdoe", u"John Doe", "password", True, None, None, None, None) def test_set_password(self): """ L{UserManagement.set_password} should use C{usermod} to change a user's password. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", password="password") self.assertEqual(len(provider.popen.popen_inputs), 1) password = provider.popen.popen_inputs[0][2] password = guess_password(password, "password") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-p", password, "jdoe"]]) def test_set_password_with_system_user(self): """ L{UserManagement.set_password} should allow us to edit system users. """ data = [("root", "x", 0, 0, ",,,,", "/home/root", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("root", password="password") self.assertEqual(len(provider.popen.popen_inputs), 1) password = provider.popen.popen_inputs[0][2] password = guess_password(password, "password") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-p", password, "root"]]) def test_set_password_unicode(self): """ Make sure passing unicode as username and password doesn't change things much (note that using something that's non-ASCII-encodable still probably won't work). """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", password=u"password") self.assertEqual(len(provider.popen.popen_inputs), 1) password = provider.popen.popen_inputs[0][2] password = guess_password(password, "password") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-p", password, "jdoe"]]) def test_set_name(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's name. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", name="JD") self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(provider.popen.popen_inputs, [["chfn", "-f", "JD", "jdoe"]]) def test_set_location(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's location. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", location="Everywhere") self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(provider.popen.popen_inputs, [["chfn", "-r", "Everywhere", "jdoe"]]) def test_clear_user_location(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's location. """ data = [("jdoe", "x", 1000, 1000, "JD,Room 101,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", location="") self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(provider.popen.popen_inputs, [["chfn", "-r", "", "jdoe"]]) def test_clear_telephone_numbers(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's telephone numbers. """ data = [("jdoe", "x", 1000, 1000, "JD,,+123456,+123456", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", home_number="", work_number="") self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(provider.popen.popen_inputs, [["chfn", "-w", "", "-h", "", "jdoe"]]) def test_set_user_details_fails(self): """ L{UserManagement.set_user_details} should raise an L{EditUserError} if C{chfn} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("", return_codes=[1])) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.set_user_details, 1000, name="John Doe") def test_contact_details_in_general(self): """ L{UserManagement.set_user_details} should use C{chfn} to change a user's contact details. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) location = u"Everywhere" work_number = u"1-800-123-4567" home_number = u"764-4321" management.set_user_details("jdoe", location=location, work_number=work_number, home_number=home_number) self.assertEqual(len(provider.popen.popen_inputs), 1) self.assertEqual(provider.popen.popen_inputs, [["chfn", "-r", location, "-w", work_number, "-h", home_number, "jdoe"]]) def test_set_user_details_with_unknown_username(self): """ L{UserManagement.set_user_details} should raise a L{UserManagementError} if the user being edited doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.set_user_details, "kevin", name=u"John Doe") def test_set_primary_group(self): """ L{UserManagement.set_set_user_details} should use C{usermod} to change the user's primary group. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("users", "x", 1001, [])] provider = FakeUserProvider(users=data, groups=groups, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_user_details("jdoe", primary_group_name="users") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-g", "1001", "jdoe"]]) def test_set_primary_group_unknown_group(self): """ L{UserManagement.set_user_details should use C{usermod} to change the user's primary group, in the event that we have an invalid group, we should raise a UserManagement error. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("staff", "x", 1001, [])] provider = FakeUserProvider(users=data, groups=groups, shadow_file=self.shadow_file, popen=MockPopen("group id 1002 unknown", return_codes=[1])) management = UserManagement(provider=provider) self.assertRaises(GroupNotFoundError, management.set_user_details, "jdoe", primary_group_name="unknown") def test_lock_user(self): """L{UserManagement.lock_user} should use C{usermod} to lock users.""" data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.lock_user("jdoe") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-L", "jdoe"]]) def test_lock_user_fails(self): """ L{UserManagement.lock_user} should raise a L{UserManagementError} if a C{usermod} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("", [1])) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.lock_user, 1000) def test_lock_user_with_unknown_uid(self): """ L{UserManagement.lock_user} should raise a L{UserManagementError} if the user being removed doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.lock_user, 1000) def test_unlock_user(self): """ L{UserManagement.unlock_user} should use C{usermod} to unlock users. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.unlock_user("jdoe") self.assertEqual(provider.popen.popen_inputs, [["usermod", "-U", "jdoe"]]) def test_unlock_user_fails(self): """ L{UserManagement.unlock_user} should raise an L{UserManagementError} if a C{usermod} fails. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=MockPopen("", [1])) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.unlock_user, 1000) def test_unlock_user_with_unknown_uid(self): """ L{UserManagement.unlock_user} should raise a L{UserManagementError} if the user being removed doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.unlock_user, 1000) def test_remove_user(self): """ L{UserManagement.remove_user} should use C{deluser} to remove users. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] popen = MockPopen("Removing user `jdoe'...\r\ndone.") provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=popen) management = UserManagement(provider=provider) management.remove_user("jdoe") self.assertEqual(popen.popen_inputs, [["deluser", "jdoe"]]) def test_remove_user_with_unknown_username(self): """ L{UserManagement.remove_user} should raise a L{UserManagementError} if the user being removed doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.remove_user, "smith") def test_remove_user_fails(self): """ L{UserManagement.remove_user} should raise a L{UserManagementError} if the user can't be removed. """ self.log_helper.ignore_errors(UserNotFoundError) data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] popen = MockPopen("/usr/sbin/deluser: Only root may remove a user or " "group from the system.", [1]) provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=popen) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.remove_user, "smith") def test_remove_user_and_home(self): """ L{UserManagement.remove_user} should use C{deluser} to remove the contents of a user's home directory. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] popen = MockPopen("Removing user `jdoe`...\r\ndone.", [0]) provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, popen=popen) management = UserManagement(provider=provider) management.remove_user("jdoe", delete_home=True) self.assertEqual(popen.popen_inputs, [["deluser", "jdoe", "--remove-home"]]) class GroupWriteTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.shadow_file = self.makeFile("""\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: """) def test_add_group(self): """ L{UserManagement.add_group} should use the system tool C{addgroup} to create groups. """ provider = FakeUserProvider(popen=MockPopen("Result")) management = UserManagement(provider=provider) result = management.add_group("webdev") self.assertEqual(provider.popen.popen_inputs, [["addgroup", "webdev"]]) self.assertEqual(result, "Result") def test_add_group_handles_errors(self): """ If the system tool C{addgroup} returns a non-0 exit code, L{UserManagement.add_group} should raise an L{UserManagementError}. """ provider = FakeUserProvider(popen=MockPopen("Error Result", [1])) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.add_group, "kaboom") def test_set_group_details(self): """ L{UserManagement.set_group_details} should use C{groupmode} to change a group's name. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider( users=users, shadow_file=self.shadow_file, groups=groups, popen=MockPopen("no output")) management = UserManagement(provider=provider) management.set_group_details("bizdev", "sales") self.assertEqual(provider.popen.popen_inputs, [["groupmod", "-n", "sales", "bizdev"]]) def test_set_group_details_with_unknown_groupname(self): """ L{UserManagement.set_group_details} should raise a L{UserManagementError} if the group being updated doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(GroupNotFoundError, management.set_group_details, "sales", u"newsales") def test_set_group_details_fails(self): """ L{UserManagement.set_group_details} should raise a L{UserManagementError} if the group can't be renamed. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] popen = MockPopen("groupmod: sales is not a unique name", [1]) provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=popen) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.set_group_details, "bizdev", u"sales") def test_add_member(self): """ L{UserManagement.add_group_member} should use the system tool C{gpasswd} via the process factory to add a member to a group. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=MockPopen( "Removing user jdoe from group bizdev")) management = UserManagement(provider=provider) output = management.add_group_member("jdoe", "bizdev") self.assertEqual(provider.popen.popen_inputs, [["gpasswd", "-a", "jdoe", "bizdev"]]) self.assertEqual(output, "Removing user jdoe from group bizdev") def test_add_member_with_unknown_groupname(self): """ L{UserManagement.add_group_member} should raise a L{UserManagementError} if the group to add the member to doesn't exist. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(GroupNotFoundError, management.add_group_member, "jdoe", "bizdev") def test_add_member_with_unknown_username(self): """ L{UserManagement.add_group_member} should raise a L{UserManagementError} if the user being associated doesn't exist. """ groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(groups=groups, popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.add_group_member, "bizdev", "smith") def test_add_member_failure(self): """ If adding a member to a group fails, L{UserManagement.add_group_member} should raise an L{UserManagementError}. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=MockPopen("no output", [1])) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.add_group_member, 1000, 1001) def test_remove_member(self): """ L{UserManagement.remove_group_member} should use the system tool C{gpasswd} via the process factory to remove a member from a group. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=MockPopen( "Removing user jdoe from group bizdev")) management = UserManagement(provider=provider) output = management.remove_group_member("jdoe", "bizdev") self.assertEqual(provider.popen.popen_inputs, [["gpasswd", "-d", "jdoe", "bizdev"]]) self.assertEqual(output, "Removing user jdoe from group bizdev") def test_remove_member_with_unknown_groupname(self): """ L{UserManagement.remove_group_member} should raise a L{UserManagementError} if the group to remove the member to doesn't exist. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, popen=MockPopen("", return_codes=[2])) management = UserManagement(provider=provider) self.assertRaises(GroupNotFoundError, management.remove_group_member, "jdoe", "bizdev") def test_remove_member_with_unknown_username(self): """ L{UserManagement.remove_group_member} should raise a L{UserManagementError} if the user being associated doesn't exist. """ groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(groups=groups, popen=MockPopen("", return_codes=[4])) management = UserManagement(provider=provider) self.assertRaises(UserNotFoundError, management.remove_group_member, "jdoe", "bizdev") def test_remove_member_failure(self): """ If removing a member from a group fails, L{UserManagement.remove_group_member} should raise a L{UserManagementError}. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 1001, [])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=MockPopen("no output", [1])) management = UserManagement(provider=provider) self.assertRaises(UserManagementError, management.remove_group_member, "jdoe", "bizdev") def test_remove_group(self): """ L{UserManagement.remove_group} should use C{groupdel} to remove groups. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 50, [])] popen = MockPopen("Removing group `bizdev'...\r\ndone.") provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=popen) management = UserManagement(provider=provider) management.remove_group("bizdev") self.assertEqual(provider.popen.popen_inputs, [["groupdel", "bizdev"]]) def test_remove_group_with_unknown_groupname(self): """ L{UserManagement.remove_group} should raise a L{GroupMissingError} if the group being removed doesn't exist. """ provider = FakeUserProvider(popen=MockPopen("")) management = UserManagement(provider=provider) self.assertRaises( GroupNotFoundError, management.remove_group, "ubuntu") def test_remove_group_fails(self): """ L{UserManagement.remove_user} should raise a L{RemoveUserError} if the user can't be removed. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("bizdev", "x", 50, [])] popen = MockPopen("/usr/sbin/deluser: Only root may remove a user or " "group from the system.", [1]) provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups, popen=popen) management = UserManagement(provider=provider) self.assertRaises( GroupNotFoundError, management.remove_group, "ubuntu") landscape-client-14.01/landscape/user/tests/__init__.py0000644000175000017500000000000012301414317022706 0ustar andreasandreaslandscape-client-14.01/landscape/user/tests/test_provider.py0000644000175000017500000005666312301414317024072 0ustar andreasandreasimport pwd import grp from landscape.user.provider import (UserProvider, UserNotFoundError, GroupNotFoundError) from landscape.user.tests.helpers import FakeUserProvider from landscape.tests.helpers import LandscapeTest class ProviderTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.shadow_file = self.makeFile("""\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: """) self.passwd_file = self.makeFile("""\ root:x:0:0:root:/root:/bin/bash haldaemon:x:107:116:Hardware abstraction layer,,,:/home/haldaemon:/bin/false kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash """) self.group_file = self.makeFile("""\ root:x:0: cdrom:x:24:haldaemon,kevin kevin:x:1000: """) def test_get_uid(self): """ Given a username L{UserProvider.get_uid} returns the UID or raises a L{UserProviderError} if a match isn't found. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) self.assertEqual(provider.get_uid("jdoe"), 1000) self.assertRaises(UserNotFoundError, provider.get_uid, "john") def test_get_users(self): """Get users should return data for all users found on the system.""" data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "name": u"JD", "uid": 1000, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_gecos_data(self): """ Location, home phone number, and work phone number should be correctly parsed out of the GECOS field, and included in the users message. """ data = [("jdoe", "x", 1000, 1000, "JD,Everywhere,7654321,123HOME,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "name": u"JD", "uid": 1000, "enabled": True, "location": u"Everywhere", "home-phone": u"123HOME", "work-phone": u"7654321", "primary-gid": 1000}]) def test_four_gecos_fields(self): """If a GECOS field only has four fields it should still work.""" data = [("jdoe", "x", 1000, 1000, "JD,Everywhere,7654321,123HOME", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "name": u"JD", "uid": 1000, "enabled": True, "location": u"Everywhere", "home-phone": u"123HOME", "work-phone": u"7654321", "primary-gid": 1000}]) def test_old_school_gecos_data(self): """ If C{useradd} is used to add users to a system the GECOS field will be written as a comment. The client must be resilient to this situation. """ data = [("jdoe", "x", 1000, 1000, "John Doe", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "uid": 1000, "enabled": True, "name": u"John Doe", "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_weird_gecos_data(self): """ If GECOS data is malformed in a way that contains less than four fields, read as many as are available. """ data = [("jdoe", "x", 1000, 1000, "John Doe,Everywhere", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "uid": 1000, "enabled": True, "name": "John Doe", "location": "Everywhere", "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_no_gecos_data(self): """ When no data is provided in the GECOS field we should report all optional fields as C{None}. """ data = [("jdoe", "x", 1000, 1000, "", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "uid": 1000, "enabled": True, "name": None, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_utf8_gecos_data(self): """Gecos fields should be decoded from utf-8 to unicode.""" name = u"Jos\N{LATIN SMALL LETTER E WITH ACUTE}" location = "F\N{LATIN SMALL LETTER I WITH DIAERESIS}nland" number = "N\N{LATIN SMALL LETTER AE}ver" gecos = "%s,%s,%s,%s," % (name.encode("utf-8"), location.encode("utf-8"), number.encode("utf-8"), number.encode("utf-8")) data = [("jdoe", "x", 1000, 1000, gecos, "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users[0]["name"], name) self.assertEqual(users[0]["location"], location) self.assertEqual(users[0]["home-phone"], number) self.assertEqual(users[0]["work-phone"], number) def test_non_utf8_data(self): """ If a GECOS field contains non-UTF8 data, it should be replaced with question marks. """ unicode_unknown = u'\N{REPLACEMENT CHARACTER}' data = [("jdoe", "x", 1000, 1000, "\255,\255,\255,\255", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users[0]["name"], unicode_unknown) self.assertEqual(users[0]["location"], unicode_unknown) self.assertEqual(users[0]["home-phone"], unicode_unknown) self.assertEqual(users[0]["work-phone"], unicode_unknown) def test_get_disabled_user(self): """The C{enabled} field should be C{False} for disabled users.""" data = [("psmith", "x", 1000, 1000, "Peter Smith,,,,", "/home/psmith", "/bin/bash")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file, locked_users=["psmith"]) users = provider.get_users() self.assertEqual(users, [ {"username": "psmith", "name": u"Peter Smith", "uid": 1000, "enabled": False, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_real_user_data(self): """L{UserProvider} should work with real data.""" provider = UserProvider() provider.shadow_file = None users = provider.get_users() user_0 = pwd.getpwuid(0) for user in users: if user["username"] == user_0.pw_name: self.assertEqual(user["uid"], 0) user_0_name = user_0.pw_gecos.split(",")[0].decode( "utf-8", "replace") self.assertEqual(user["name"], user_0_name) break else: self.fail("The user %s (uid=0) was not found in the get_data " "result." % (user_0.pw_name)) def test_get_users_duplicate_usernames(self): """ Get users should return data for all users found on the system, but it should exclude duplicate usernames. """ data = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh"), ("jdoe", "x", 1001, 1001, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "jdoe", "name": u"JD", "uid": 1000, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_get_users_duplicate_uids(self): """ Get users should return data for all users found on the system, including users with duplicated uids. """ data = [("joe1", "x", 1000, 1000, "JD,,,,", "/home/joe1", "/bin/zsh"), ("joe2", "x", 1000, 1000, "JD,,,,", "/home/joe2", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(users, [{"username": "joe1", "name": u"JD", "uid": 1000, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}, {"username": "joe2", "name": u"JD", "uid": 1000, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 1000}]) def test_user_not_in_shadow_file(self): """ Given a username that doesn't exist in the shadow file, we should get a UserProvider error rather than a KeyError. raises a L{UserProviderError} if a match isn't found. """ data = [("johndoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] provider = FakeUserProvider(users=data, shadow_file=self.shadow_file) users = provider.get_users() self.assertEqual(len(users), 1) self.assertEqual(sorted([x[0] for x in data]), ["johndoe"]) def test_get_gid(self): """ Given a username L{UserProvider.get_gid} returns the GID or raises a L{UserProviderError} if a match isn't found. """ provider = FakeUserProvider(groups=[("jdoe", "x", 1000, [])]) self.assertEqual(provider.get_gid("jdoe"), 1000) self.assertRaises(GroupNotFoundError, provider.get_gid, "john") def test_group_without_members(self): """ L{UserProvider.get_groups} should include groups without members. """ provider = FakeUserProvider(groups=[("jdoe", "x", 1000, [])]) self.assertEqual(provider.get_groups(), [{"name": "jdoe", "gid": 1000, "members": []}]) def test_group_with_members(self): """L{UserProvider.get_groups} should include groups with members.""" users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe"])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups) self.assertEqual(provider.get_groups(), [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) def test_group_with_unknown_members(self): """L{UserProvider.get_groups} should include groups with members. If a member's userid isn't known to the system, it shouldn't be returned. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe", "kevin"])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups) self.assertEqual(provider.get_groups(), [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) def test_group_with_duplicate_members(self): """ L{UserProvider.get_groups} should only report groups once. If duplicates exist they should be removed. The problem reported in bug #118799 is related to duplicates being reported to the server. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe", "jdoe"])] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups) self.assertEqual(provider.get_groups(), [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) def test_group_with_duplicate_groupnames(self): """ L{UserProvider.get_groups} should only report members once. If duplicates exist they should be removed. The problem reported in bug #118799 is related to duplicates being reported to the server. """ users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/zsh")] groups = [("sales", "x", 50, ["jdoe"]), ("sales", "x", 51, ["jdoe"]), ] provider = FakeUserProvider(users=users, shadow_file=self.shadow_file, groups=groups) self.assertEqual(provider.get_groups(), [{"name": "sales", "gid": 50, "members": ["jdoe"]}]) def test_real_group_data(self): """ Assert that L{UserProvider.get_group}'s functionality reflects what is accessible from the Python standard C{grp} module. """ provider = UserProvider() group_0 = grp.getgrgid(0) groups = provider.get_groups() for group in groups: if group["name"] == group_0.gr_name: self.assertEqual(group["gid"], 0) self.assertEqual(group["members"], group_0.gr_mem) break else: self.fail("The group %s (gid=0) was not found in the get_data " "result." % (group_0.gr_name,)) def test_get_user_data(self): """This tests the functionality for parsing /etc/passwd style files.""" provider = UserProvider(passwd_file=self.passwd_file, group_file=self.group_file) users = provider.get_user_data() self.assertEqual(users[0], ("root", "x", 0, 0, "root", "/root", "/bin/bash")) self.assertEqual(users[1], ("haldaemon", "x", 107, 116, "Hardware abstraction layer,,,", "/home/haldaemon", "/bin/false")) self.assertEqual(users[2], ("kevin", "x", 1001, 65534, "Kevin,101,+44123123,+44123124", "/home/kevin", "/bin/bash")) def test_get_users_with_many(self): """ The method get_users is responsible for translating tuples of information from the underlying user database into dictionaries. """ provider = UserProvider(passwd_file=self.passwd_file, group_file=self.group_file) users = provider.get_users() self.assertEqual(users[0], {"username": "root", "name": u"root", "uid": 0, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 0}) self.assertEqual(users[1], {"username": "haldaemon", "name": u"Hardware abstraction layer", "uid": 107, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 116}) self.assertEqual(users[2], {"username": "kevin", "name": u"Kevin", "uid": 1001, "enabled": True, "location": u"101", "home-phone": u"+44123124", "work-phone": u"+44123123", "primary-gid": 65534}) def test_get_group_data(self): """This tests the functionality for parsing /etc/group style files.""" provider = UserProvider(passwd_file=self.passwd_file, group_file=self.group_file) groups = provider.get_group_data() self.assertEqual(groups[0], (u"root", u"x", 0, [u""])) self.assertEqual(groups[1], (u"cdrom", u"x", 24, [u"haldaemon", u"kevin"])) self.assertEqual(groups[2], (u"kevin", u"x", 1000, [u""])) def test_get_groups(self): """ The method get_groups is responsible for translating tuples of data from the underlying userdatabase into dictionaries. """ provider = UserProvider(passwd_file=self.passwd_file, group_file=self.group_file) groups = provider.get_groups() self.assertEqual(groups[0], {"name": u"root", "gid": 0, "members": []}) self.assertEqual(groups[1], {"name": u"cdrom", "gid": 24, "members": [u"kevin", u"haldaemon"]}) self.assertEqual(groups[2], {"name": u"kevin", "gid": 1000, "members": []}) def test_get_users_incorrect_passwd_file(self): """ This tests the functionality for parsing /etc/passwd style files. Incorrectly formatted lines according to passwd(5) should be ignored during processing. """ passwd_file = self.makeFile("""\ root:x:0:0:root:/root:/bin/bash broken haldaemon:x:107:Hardware abstraction layer,,,:/home/haldaemon:/bin/false kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash +:::::: broken2 """) provider = UserProvider(passwd_file=passwd_file, group_file=self.group_file) users = provider.get_users() self.assertEqual(users[0], {"username": "root", "name": u"root", "uid": 0, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 0}) self.assertEqual(users[1], {"username": "kevin", "name": u"Kevin", "uid": 1001, "enabled": True, "location": u"101", "home-phone": u"+44123124", "work-phone": u"+44123123", "primary-gid": 65534}) log = ("WARNING: passwd file %s is incorrectly formatted: line 2." % passwd_file) self.assertIn(log, self.logfile.getvalue()) log2 = ("WARNING: passwd file %s is incorrectly formatted: line 3." % passwd_file) self.assertIn(log2, self.logfile.getvalue()) log3 = ("WARNING: passwd file %s is incorrectly formatted: line 6." % passwd_file) self.assertIn(log3, self.logfile.getvalue()) def test_get_users_nis_line(self): """ This tests the functionality for parsing /etc/passwd style files. We should ignore the specific pattern for NIS user-extensions in passwd files. """ passwd_file = self.makeFile("""\ root:x:0:0:root:/root:/bin/bash kevin:x:1001:65534:Kevin,101,+44123123,+44123124:/home/kevin:/bin/bash +jkakar:::::: -radix:::::: +:::::: """) provider = UserProvider(passwd_file=passwd_file, group_file=self.group_file) users = provider.get_users() self.assertTrue(len(users), 2) self.assertEqual(users[0], {"username": "root", "name": u"root", "uid": 0, "enabled": True, "location": None, "home-phone": None, "work-phone": None, "primary-gid": 0}) self.assertEqual(users[1], {"username": "kevin", "name": u"Kevin", "uid": 1001, "enabled": True, "location": u"101", "home-phone": u"+44123124", "work-phone": u"+44123123", "primary-gid": 65534}) log = ("WARNING: passwd file %s is incorrectly formatted" % passwd_file) self.assertTrue(log not in self.logfile.getvalue()) def test_get_groups_incorrect_groups_file(self): """ This tests the functionality for parsing /etc/group style files. Incorrectly formatted lines according to group(5) should be ignored during processing. """ group_file = self.makeFile("""\ root:x:0: cdrom:x:24: kevin:x:kevin: """) provider = UserProvider(passwd_file=self.passwd_file, group_file=group_file) groups = provider.get_groups() self.assertEqual(groups[0], {"name": u"root", "gid": 0, "members": []}) self.assertEqual(groups[1], {"name": u"cdrom", "gid": 24, "members": []}) log = ("WARNING: group file %s is incorrectly " "formatted: line 3." % group_file) self.assertIn(log, self.logfile.getvalue()) def test_get_groups_nis_line(self): """ This tests the functionality for parsing /etc/group style files. We should ignore the specific pattern for NIS user-extensions in group files. """ group_file = self.makeFile("""\ root:x:0: cdrom:x:24: +jkakar::: -radix::: +::: """) provider = UserProvider(passwd_file=self.passwd_file, group_file=group_file) groups = provider.get_groups() self.assertEqual(groups[0], {"name": u"root", "gid": 0, "members": []}) log = ("WARNING: group file %s is incorrectly formatted" % group_file) self.assertTrue(log not in self.logfile.getvalue()) landscape-client-14.01/landscape/user/__init__.py0000644000175000017500000000000012301414317021544 0ustar andreasandreaslandscape-client-14.01/landscape/user/management.py0000644000175000017500000002221412301414317022134 0ustar andreasandreas# XXX: There is the potential for some sort of "unixadmin" package # which wraps up the commands which we use in this module in a Python # API, with thorough usage of exceptions and such, instead of pipes to # subprocesses. liboobs (i.e. System Tools) is a possibility, and has # documentation now in the 2.17 series, but is not wrapped to Python. import os import logging import subprocess from landscape.lib import md5crypt from landscape.user.provider import UserManagementError, UserProvider class UserManagement(object): """Manage system users and groups.""" def __init__(self, provider=None): self._provider = provider or UserProvider() def add_user(self, username, name, password, require_password_reset, primary_group_name, location, work_phone, home_phone): """Add C{username} to the computer. @raises UserManagementError: Raised when C{adduser} fails. @raises UserManagementError: Raised when C{passwd} fails. """ logging.info("Adding user %s.", username) gecos = "%s,%s,%s,%s" % (name, location or "", work_phone or "", home_phone or "") command = ["adduser", username, "--disabled-password", "--gecos", gecos] if primary_group_name: command.extend(["--gid", str(self._provider.get_gid( primary_group_name))]) result, output = self.call_popen(command) if result != 0: raise UserManagementError("Error adding user %s.\n%s" % (username, output)) self._set_password(username, password) if require_password_reset: result, new_output = self.call_popen(["passwd", username, "-e"]) if result != 0: raise UserManagementError("Error resetting password for user " "%s.\n%s" % (username, new_output)) else: output += new_output return output def _set_password(self, username, password): # XXX temporary workaround? We're getting unicode here. username = username.encode("ascii") password = password.encode("ascii") salt = os.urandom(6).encode("base64")[:-1] crypted = md5crypt.md5crypt(password, salt) result, output = self.call_popen(["usermod", "-p", crypted, username]) if result != 0: raise UserManagementError("Error setting password for user " "%s.\n%s" % (username, output)) return output def _set_primary_group(self, username, groupname): primary_gid = self._provider.get_gid(groupname) command = ["usermod", "-g", str(primary_gid), username] result, output = self.call_popen(command) if result != 0: raise UserManagementError("Error setting primary group to %d for" "%s.\n%s" % (primary_gid, username, output)) return output def set_user_details(self, username, password=None, name=None, location=None, work_number=None, home_number=None, primary_group_name=None): """Update details for the account matching C{uid}.""" uid = self._provider.get_uid(username) logging.info("Updating data for user %s (UID %d).", username, uid) if password: self._set_password(username, password) if primary_group_name: self._set_primary_group(username, primary_group_name) command = ["chfn"] for option, value in [("-r", location), ("-f", name), ("-w", work_number), ("-h", home_number)]: if value is not None: command += [option, value] if len(command) > 1: result, output = self.call_popen(command + [username]) if result != 0: raise UserManagementError("Error setting details for user " "%s.\n%s" % (username, output)) return output def lock_user(self, username): """ Lock the account matching C{username} to prevent them from logging in. """ uid = self._provider.get_uid(username) logging.info("Locking out user %s (UID %d).", username, uid) result, output = self.call_popen(["usermod", "-L", username]) if result != 0: raise UserManagementError("Error locking user %s.\n%s" % (username, output)) def unlock_user(self, username): """Unlock the account matching C{username}.""" uid = self._provider.get_uid(username) logging.info("Unlocking user %s (UID %d).", username, uid) result, output = self.call_popen(["usermod", "-U", username]) if result != 0: raise UserManagementError("Error unlocking user %s.\n%s" % (username, output)) return output def remove_user(self, username, delete_home=False): """Remove the account matching C{username} from the computer.""" uid = self._provider.get_uid(username) command = ["deluser", username] if delete_home: logging.info("Removing user %s (UID %d) and deleting their home " "directory.", username, uid) command.append("--remove-home") else: logging.info("Removing user %s (UID %d) without deleting their " "home directory.", username, uid) result, output = self.call_popen(command) if result != 0: raise UserManagementError("Error removing user %s (UID %d).\n%s" % (username, uid, output)) return output def add_group(self, groupname): """Add C{group} with the C{addgroup} system command.""" logging.info("Adding group %s.", groupname) result, output = self.call_popen(["addgroup", groupname]) if result != 0: raise UserManagementError("Error adding group %s.\n%s" % (groupname, output)) return output def set_group_details(self, groupname, new_name): """Update details for the group matching C{gid}.""" gid = self._provider.get_gid(groupname) logging.info("Renaming group %s (GID %d) to %s.", groupname, gid, new_name) command = ["groupmod", "-n", new_name, groupname] result, output = self.call_popen(command) if result != 0: raise UserManagementError("Error renaming group %s (GID %d) to " "%s.\n%s" % (groupname, gid, new_name, output)) return output def add_group_member(self, username, groupname): """ Add the user matching C{username} to the group matching C{groupname} with the C{gpasswd} system command. """ uid = self._provider.get_uid(username) gid = self._provider.get_gid(groupname) logging.info("Adding user %s (UID %d) to group %s (GID %d).", username, uid, groupname, gid) result, output = self.call_popen(["gpasswd", "-a", username, groupname]) if result != 0: raise UserManagementError("Error adding user %s (UID %d) to " "group %s (GID %d).\n%s" % (username, uid, groupname, gid, output)) return output def remove_group_member(self, username, groupname): """ Remove the user matching C{username} from the group matching C{groupname} with the C{gpasswd} system command. """ uid = self._provider.get_uid(username) gid = self._provider.get_gid(groupname) logging.info("Removing user %s (UID %d) from group %s (GID %d).", username, uid, groupname, gid) result, output = self.call_popen(["gpasswd", "-d", username, groupname]) if result != 0: raise UserManagementError("Error removing user %s (UID %d) " "from group %s (GID (%d).\n%s" % (username, uid, groupname, gid, output)) return output def remove_group(self, groupname): """Remove the account matching C{groupname} from the computer.""" gid = self._provider.get_gid(groupname) logging.info("Removing group %s (GID %d).", groupname, gid) result, output = self.call_popen(["groupdel", groupname]) if result != 0: raise UserManagementError("Error removing group %s (GID %d).\n%s" % (groupname, gid, output)) return output def call_popen(self, args): popen = self._provider.popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = popen.stdout.read() result = popen.wait() return result, output landscape-client-14.01/landscape/watchdog.py0000644000175000017500000006152012301414317020645 0ustar andreasandreas"""See L{WatchDog}. The WatchDog must run as root, because it spawns the Landscape Manager. The main C{landscape-client} program uses this watchdog. """ import os import errno import sys import pwd import signal import time from logging import warning, info, error from resource import setrlimit, RLIMIT_NOFILE from twisted.internet import reactor from twisted.internet.defer import Deferred, succeed from twisted.internet.protocol import ProcessProtocol from twisted.internet.error import ProcessExitedAlready from twisted.application.service import Service, Application from twisted.application.app import startApplication from landscape.deployment import init_logging, Configuration from landscape.lib.twisted_util import gather_results from landscape.lib.log import log_failure from landscape.lib.bootstrap import (BootstrapList, BootstrapFile, BootstrapDirectory) from landscape.log import rotate_logs from landscape.broker.amp import ( RemoteBrokerConnector, RemoteMonitorConnector, RemoteManagerConnector) from landscape.reactor import LandscapeReactor from landscape.lib.dns import discover_server from landscape.configuration import ( fetch_base64_ssl_public_certificate, decode_base64_ssl_public_certificate) GRACEFUL_WAIT_PERIOD = 10 MAXIMUM_CONSECUTIVE_RESTARTS = 5 RESTART_BURST_DELAY = 30 # seconds SIGKILL_DELAY = 10 class DaemonError(Exception): """One of the daemons could not be started.""" class TimeoutError(Exception): """Something took too long.""" class ExecutableNotFoundError(Exception): """An executable was not found.""" class Daemon(object): """A Landscape daemon which can be started and tracked. This class should be subclassed to specify individual daemon. @cvar program: The name of the executable program that will start this daemon. @cvar username: The name of the user to switch to, by default. @cvar service: The DBus service name that the program will be expected to listen on. @cvar max_retries: The maximum number of retries before giving up when trying to connect to the watched daemon. @cvar factor: The factor by which the delay between subsequent connection attempts will increase. @param connector: The L{ComponentConnector} of the daemon. @param reactor: The reactor used to spawn the process and schedule timed calls. @param verbose: Optionally, report more information when running this program. Defaults to False. """ username = "landscape" max_retries = 3 factor = 1.1 options = None def __init__(self, connector, reactor=reactor, verbose=False, config=None): self._connector = connector self._reactor = reactor self._env = os.environ.copy() my_uid = os.getuid() if my_uid == 0: pwd_info = pwd.getpwnam(self.username) target_uid = pwd_info.pw_uid target_gid = pwd_info.pw_gid if target_uid != my_uid: self._uid = target_uid else: self._uid = None if target_gid != os.getgid(): self._gid = target_gid else: self._gid = None self._env["HOME"] = pwd_info.pw_dir self._env["USER"] = self.username self._env["LOGNAME"] = self.username else: # We can only switch UIDs if we're root, so simply don't switch # UIDs if we're not. self._uid = None self._gid = None self._verbose = verbose self._config = config self._process = None self._last_started = 0 self._quick_starts = 0 self._allow_restart = True def find_executable(self): """Find the fully-qualified path to the executable. If the executable can't be found, L{ExecutableNotFoundError} will be raised. """ dirname = os.path.dirname(os.path.abspath(sys.argv[0])) executable = os.path.join(dirname, self.program) if not os.path.exists(executable): raise ExecutableNotFoundError("%s doesn't exist" % (executable,)) return executable def start(self): """Start this daemon.""" self._process = None now = time.time() if self._last_started + RESTART_BURST_DELAY > now: self._quick_starts += 1 if self._quick_starts == MAXIMUM_CONSECUTIVE_RESTARTS: error("Can't keep %s running. Exiting." % self.program) self._reactor.stop() return else: self._quick_starts = 0 self._last_started = now self._process = WatchedProcessProtocol(self) exe = self.find_executable() args = [exe, "--ignore-sigint"] if not self._verbose: args.append("--quiet") if self._config: args.extend(["-c", self._config]) if self.options is not None: args.extend(self.options) self._reactor.spawnProcess(self._process, exe, args=args, env=self._env, uid=self._uid, gid=self._gid) def stop(self): """Stop this daemon.""" if not self._process: return succeed(None) return self._process.kill() def _connect_and_call(self, name, *args, **kwargs): """Connect to the remote daemon over AMP and perform the given command. @param name: The name of the command to perform. @param args: Arguments list to be passed to the connect method @param kwargs: Keywords arguments to pass to the connect method. @return: A L{Deferred} resulting in C{True} if the command was successful or C{False} otherwise. @see: L{RemoteLandscapeComponentCreator.connect}. """ def disconnect(ignored): self._connector.disconnect() return True connected = self._connector.connect(self.max_retries, self.factor, quiet=True) connected.addCallback(lambda remote: getattr(remote, name)()) connected.addCallback(disconnect) connected.addErrback(lambda x: False) return connected def request_exit(self): return self._connect_and_call("exit") def is_running(self): # FIXME Error cases may not be handled in the best possible way # here. We're basically return False if any error happens from the # dbus ping. return self._connect_and_call("ping") def wait(self): """ Return a Deferred which will fire when the process has died. """ if not self._process: return succeed(None) return self._process.wait() def wait_or_die(self): """ Wait for the process to die for C{GRACEFUL_WAIT_PERIOD}. If it hasn't died by that point, send it a SIGTERM. If it doesn't die for C{SIGKILL_DELAY}, """ if not self._process: return succeed(None) return self._process.wait_or_die() def prepare_for_shutdown(self): """Called by the watchdog when starting to shut us down. It will prevent our L{WatchedProcessProtocol} to restart the process when it exits. """ self._allow_restart = False def allow_restart(self): """Return a boolean indicating if the daemon should be restarted.""" return self._allow_restart def rotate_logs(self): self._process.rotate_logs() class Broker(Daemon): program = "landscape-broker" class Monitor(Daemon): program = "landscape-monitor" class Manager(Daemon): program = "landscape-manager" username = "root" class WatchedProcessProtocol(ProcessProtocol): """ A process-watching protocol which sends any of its output to the log file and restarts it when it dies. """ _killed = False def __init__(self, daemon): self.daemon = daemon self._wait_result = None self._delayed_really_kill = None self._delayed_terminate = None def kill(self): self._terminate() return self.wait() def _terminate(self, warn=False): if self.transport is not None: if warn: warning("%s didn't exit. Sending SIGTERM" % (self.daemon.program,)) try: self.transport.signalProcess(signal.SIGTERM) except ProcessExitedAlready: pass else: # Give some time for the process, and then show who's the boss. delayed = reactor.callLater(SIGKILL_DELAY, self._really_kill) self._delayed_really_kill = delayed def _really_kill(self): try: self.transport.signalProcess(signal.SIGKILL) except ProcessExitedAlready: pass else: warning("%s didn't die. Sending SIGKILL." % self.daemon.program) self._delayed_really_kill = None def rotate_logs(self): if self.transport is not None: try: self.transport.signalProcess(signal.SIGUSR1) except ProcessExitedAlready: pass def wait(self): if self.transport.pid is None: return succeed(None) self._wait_result = Deferred() return self._wait_result def wait_or_die(self): self._delayed_terminate = reactor.callLater(GRACEFUL_WAIT_PERIOD, self._terminate, warn=True) return self.wait() def outReceived(self, data): # it's *probably* going to always be line buffered, by accident sys.stdout.write(data) def errReceived(self, data): sys.stderr.write(data) def processEnded(self, reason): """The process has ended; restart it.""" if self._delayed_really_kill is not None: self._delayed_really_kill.cancel() if (self._delayed_terminate is not None and self._delayed_terminate.active()): self._delayed_terminate.cancel() if self._wait_result is not None: self._wait_result.callback(None) elif self.daemon.allow_restart(): self.daemon.start() class WatchDog(object): """ The Landscape WatchDog starts all other landscape daemons and ensures that they are working. """ def __init__(self, reactor=reactor, verbose=False, config=None, broker=None, monitor=None, manager=None, enabled_daemons=None): landscape_reactor = LandscapeReactor() if enabled_daemons is None: enabled_daemons = [Broker, Monitor, Manager] if broker is None and Broker in enabled_daemons: broker = Broker( RemoteBrokerConnector(landscape_reactor, config), verbose=verbose, config=config.config) if monitor is None and Monitor in enabled_daemons: monitor = Monitor( RemoteMonitorConnector(landscape_reactor, config), verbose=verbose, config=config.config) if manager is None and Manager in enabled_daemons: manager = Manager( RemoteManagerConnector(landscape_reactor, config), verbose=verbose, config=config.config) self.broker = broker self.monitor = monitor self.manager = manager self.daemons = filter(None, [self.broker, self.monitor, self.manager]) self.reactor = reactor self._checking = None self._stopping = False signal.signal( signal.SIGUSR1, lambda signal, frame: reactor.callFromThread( self._notify_rotate_logs)) if config is not None and config.clones > 0: options = ["--clones", str(config.clones), "--start-clones-over", str(config.start_clones_over)] for daemon in self.daemons: daemon.options = options self._ping_failures = {} def check_running(self): """Return a list of any daemons that are already running.""" results = [] for daemon in self.daemons: # This method is called on startup, we basically try to connect # a few times in fast sequence (with exponential backoff), if we # don't get a response we assume the daemon is not running. result = daemon.is_running() result.addCallback(lambda is_running, d=daemon: (is_running, d)) results.append(result) def got_all_results(r): return [x[1] for x in r if x[0]] return gather_results(results).addCallback(got_all_results) def start(self): """ Start all daemons. The broker will be started first, and no other daemons will be started before it is running and responding to DBUS messages. @return: A deferred which fires when all services have successfully started. If a daemon could not be started, the deferred will fail with L{DaemonError}. """ for daemon in self.daemons: daemon.start() self.start_monitoring() def start_monitoring(self): """Start monitoring processes which have already been started.""" # Must wait before daemons actually start, otherwise check will # restart them *again*. self._checking = self.reactor.callLater(5, self._check) def _restart_if_not_running(self, is_running, daemon): if (not is_running) and (not self._stopping): warning("%s failed to respond to a ping." % (daemon.program,)) if daemon not in self._ping_failures: self._ping_failures[daemon] = 0 self._ping_failures[daemon] += 1 if self._ping_failures[daemon] == 5: warning("%s died! Restarting." % (daemon.program,)) stopping = daemon.stop() def stopped(ignored): daemon.start() self._ping_failures[daemon] = 0 stopping.addBoth(stopped) return stopping else: self._ping_failures[daemon] = 0 def _check(self): all_running = [] for daemon in self.daemons: is_running = daemon.is_running() is_running.addCallback(self._restart_if_not_running, daemon) all_running.append(is_running) def reschedule(ignored): self._checking = self.reactor.callLater(5, self._check) gather_results(all_running).addBoth(reschedule) def request_exit(self): if self._checking is not None and self._checking.active(): self._checking.cancel() # Set a flag so that the pinger will avoid restarting the daemons if a # ping has already been sent but not yet responded to. self._stopping = True # This tells the daemons to not automatically restart when they end for daemon in self.daemons: daemon.prepare_for_shutdown() def terminate_processes(broker_stopped): if broker_stopped: results = [daemon.wait_or_die() for daemon in self.daemons] else: # If request_exit fails, we should just kill the daemons # immediately. error("Couldn't request that broker gracefully shut down; " "killing forcefully.") results = [x.stop() for x in self.daemons] return gather_results(results) result = self.broker.request_exit() return result.addCallback(terminate_processes) def _notify_rotate_logs(self): for daemon in self.daemons: daemon.rotate_logs() rotate_logs() class WatchDogConfiguration(Configuration): def make_parser(self): parser = super(WatchDogConfiguration, self).make_parser() parser.add_option("--daemon", action="store_true", help="Fork and run in the background.") parser.add_option("--pid-file", type="str", help="The file to write the PID to.") parser.add_option("--monitor-only", action="store_true", help="Don't enable management features. This is " "useful if you want to run the client as a non-root " "user.") return parser def get_enabled_daemons(self): daemons = [Broker, Monitor] if not self.monitor_only: daemons.append(Manager) return daemons def daemonize(): # See http://www.steve.org.uk/Reference/Unix/faq_2.html#SEC16 if os.fork(): # launch child and... os._exit(0) # kill off parent os.setsid() if os.fork(): # launch child and... os._exit(0) # kill off parent again. # some argue that this umask should be 0, but that's annoying. os.umask(077) null = os.open('/dev/null', os.O_RDWR) for i in range(3): try: os.dup2(null, i) except OSError, e: if e.errno != errno.EBADF: raise os.close(null) class WatchDogService(Service): def __init__(self, config): self._config = config self.watchdog = WatchDog(verbose=not config.daemon, config=config, enabled_daemons=config.get_enabled_daemons()) self.exit_code = 0 def autodiscover(self): """ Autodiscover called if config setting config.server_autodiscover is True. This method allows the watchdog to attempt server autodiscovery, fetch the discovered landscape server's custom CA certificate, and write both the certificate and the updated config file with the discovered values. """ def update_config(hostname): if hostname is None: warning("Autodiscovery returned empty hostname string. " "Reverting to previous settings.") else: info("Autodiscovery found landscape server at %s. " "Updating configuration values." % hostname) self._config.server_autodiscover = False self._config.url = "https://%s/message-system" % hostname self._config.ping_url = "http://%s/ping" % hostname if not self._config.ssl_public_key: # If we don't have a key on this system, pull it from # the auto-discovered server and write it to the filesystem ssl_public_key = fetch_base64_ssl_public_certificate( hostname, on_info=info, on_error=warning) if ssl_public_key: self._config.ssl_public_key = ssl_public_key decode_base64_ssl_public_certificate(self._config) self._config.write() return hostname def discovery_error(result): warning("Autodiscovery failed. Reverting to previous settings.") lookup_deferred = discover_server( self._config.autodiscover_srv_query_string, self._config.autodiscover_a_query_string) lookup_deferred.addCallback(update_config) lookup_deferred.addErrback(discovery_error) return lookup_deferred def startService(self): Service.startService(self) bootstrap_list.bootstrap(data_path=self._config.data_path, log_dir=self._config.log_dir) if self._config.clones > 0: # Let clones open an appropriate number of fds setrlimit(RLIMIT_NOFILE, (self._config.clones * 100, self._config.clones * 200)) # Increase the timeout of AMP's MethodCalls. # XXX: we should find a better way to expose this knot, and # not set it globally on the class from landscape.lib.amp import MethodCallSender MethodCallSender.timeout = 300 # Create clones log and data directories for i in range(self._config.clones): suffix = "-clone-%d" % i bootstrap_list.bootstrap( data_path=self._config.data_path + suffix, log_dir=self._config.log_dir + suffix) result = succeed(None) if self._config.server_autodiscover: result.addCallback(lambda _: self.autodiscover()) result.addCallback(lambda _: self.watchdog.check_running()) def start_if_not_running(running_daemons): if running_daemons: error("ERROR: The following daemons are already running: %s" % (", ".join(x.program for x in running_daemons))) self.exit_code = 1 reactor.crash() # so stopService isn't called. return self._daemonize() info("Watchdog watching for daemons.") return self.watchdog.start() def die(failure): log_failure(failure, "Unknown error occurred!") self.exit_code = 2 reactor.crash() result.addCallback(start_if_not_running) result.addErrback(die) return result def _daemonize(self): if self._config.daemon: daemonize() if self._config.pid_file: stream = open(self._config.pid_file, "w") stream.write(str(os.getpid())) stream.close() def stopService(self): info("Stopping client...") Service.stopService(self) # If CTRL-C is pressed twice in a row, the second SIGINT actually # kills us before subprocesses die, and that makes them hang around. signal.signal(signal.SIGINT, signal.SIG_IGN) done = self.watchdog.request_exit() done.addBoth(lambda r: self._remove_pid()) return done def _remove_pid(self): pid_file = self._config.pid_file if pid_file is not None and os.access(pid_file, os.W_OK): stream = open(pid_file) pid = stream.read() stream.close() if pid == str(os.getpid()): os.unlink(pid_file) bootstrap_list = BootstrapList([ BootstrapDirectory("$data_path", "landscape", "root", 0755), BootstrapDirectory("$data_path/package", "landscape", "root", 0755), BootstrapDirectory( "$data_path/package/hash-id", "landscape", "root", 0755), BootstrapDirectory( "$data_path/package/binaries", "landscape", "root", 0755), BootstrapDirectory( "$data_path/package/upgrade-tool", "landscape", "root", 0755), BootstrapDirectory("$data_path/messages", "landscape", "root", 0755), BootstrapDirectory("$data_path/sockets", "landscape", "root", 0750), BootstrapDirectory( "$data_path/custom-graph-scripts", "landscape", "root", 0755), BootstrapDirectory("$log_dir", "landscape", "root", 0755), BootstrapFile("$data_path/package/database", "landscape", "root", 0644)]) def clean_environment(): """Unset dangerous environment variables. In particular unset all variables beginning with DEBIAN_ or DEBCONF_, to avoid any problems when landscape-client is invoked from its postinst script. Some environment variables may be set which would affect *other* maintainer scripts which landscape-client invokes. """ for key in os.environ.keys(): if (key.startswith(("DEBIAN_", "DEBCONF_")) or key in ["LANDSCAPE_ATTACHMENTS", "MAIL"]): del os.environ[key] def run(args=sys.argv, reactor=None): """Start the watchdog. This is the topmost function that kicks off the Landscape client. It cleans up the environment, loads the configuration, and starts the reactor. @param args: Command line arguments, including the program name as the first element. @param reactor: The reactor to use. If none is specified, the global reactor is used. @raise SystemExit: if command line arguments are bad, or when landscape- client is not running as 'root' or 'landscape'. """ clean_environment() config = WatchDogConfiguration() config.load(args) try: landscape_uid = pwd.getpwnam("landscape").pw_uid except KeyError: sys.exit("The 'landscape' user doesn't exist!") if os.getuid() not in (0, landscape_uid): sys.exit("landscape-client can only be run as 'root' or 'landscape'.") init_logging(config, "watchdog") application = Application("landscape-client") watchdog_service = WatchDogService(config) watchdog_service.setServiceParent(application) if reactor is None: from twisted.internet import reactor # We add a small delay to work around a Twisted bug: this method should # only be called when the reactor is running, but we still get a # PotentialZombieWarning. reactor.callLater(0, startApplication, application, False) reactor.run() return watchdog_service.exit_code landscape-client-14.01/landscape/textmessage.py0000644000175000017500000000603412301414317021375 0ustar andreasandreas""" Support code for the C{landscape-message} utility, which sends a text message to the Landscape web UI via the landscape-client's dbus messaging service (see L{landscape.plugins.dbus_message}). """ import sys from landscape.lib.log import log_failure from landscape.reactor import LandscapeReactor from landscape.broker.amp import RemoteBrokerConnector from landscape.deployment import Configuration class AcceptedTypeError(Exception): """ Raised when a message is sent without 'text-message' being an accepted type. """ class EmptyMessageError(Exception): """Raised when an empty message is provied.""" def send_message(text, broker): """Add a message to the queue via a remote broker. The message is of type C{text-message}. @param broker: A connected L{RemoteBroker} object to use to send the message. @return: A L{Deferred} which will fire with the result of the send. """ def got_session_id(session_id): response = broker.send_message(message, session_id, True) return response message = {"type": "text-message", "message": text} result = broker.get_session_id() result.addCallback(got_session_id) return result def got_result(result): print u"Message sent." def get_message(args): encoding = sys.stdin.encoding or "UTF-8" if len(args) < 2: print ("Please enter your message, and send EOF (Control + D after " "newline) when done.") message = sys.stdin.read().decode(encoding) else: message = u" ".join([x.decode(encoding) for x in args[1:]]) if not message: raise EmptyMessageError("Text messages may not be empty.") return message def got_accepted_types(accepted_types, broker, args): if not "text-message" in accepted_types: raise AcceptedTypeError("Text messages may not be created. Is " "Landscape Client registered with the server?") message = get_message(args) d = send_message(message, broker) d.addCallback(got_result) return d def run(args=sys.argv): """Send a message to Landscape. This function runs a Twisted reactor, prints various status messages, and exits the process. """ reactor = LandscapeReactor() config = Configuration() config.load(args) def got_connection(broker): result = broker.get_accepted_message_types() return result.addCallback(got_accepted_types, broker, args) def got_error(failure): log_failure(failure) connector = RemoteBrokerConnector(reactor, config) result = connector.connect() result.addCallback(got_connection) result.addErrback(got_error) result.addBoth(lambda x: connector.disconnect()) # For some obscure reason our LandscapeReactor.stop method calls # reactor.crash() instead of reactor.stop(), which doesn't work # here. Maybe LandscapeReactor.stop should simply use reactor.stop(). result.addBoth(lambda ignored: reactor.call_later( 0, reactor._reactor.stop)) reactor.run() return result landscape-client-14.01/landscape/service.py0000644000175000017500000001144512301414317020506 0ustar andreasandreasimport logging import signal from twisted.application.service import Application, Service from twisted.application.app import startApplication from landscape.log import rotate_logs from landscape.reactor import LandscapeReactor from landscape.deployment import get_versioned_persist, init_logging class LandscapeService(Service, object): """Utility superclass for defining Landscape services. This sets up the reactor and L{Persist} object. @cvar service_name: The lower-case name of the service. This is used to generate the bpickle and the Unix socket filenames. @ivar config: A L{Configuration} object. @ivar reactor: A L{LandscapeReactor} object. @ivar persist: A L{Persist} object, if C{persist_filename} is defined. @ivar factory: A L{LandscapeComponentProtocolFactory}, it must be provided by instances of sub-classes. """ reactor_factory = LandscapeReactor persist_filename = None def __init__(self, config): self.config = config try: from landscape.lib import bpickle_dbus except ImportError: pass else: bpickle_dbus.install() self.reactor = self.reactor_factory() if self.persist_filename: self.persist = get_versioned_persist(self) if not (self.config is not None and self.config.ignore_sigusr1): from twisted.internet import reactor signal.signal( signal.SIGUSR1, lambda signal, frame: reactor.callFromThread(rotate_logs)) def startService(self): Service.startService(self) logging.info("%s started with config %s" % ( self.service_name.capitalize(), self.config.get_config_filename())) def stopService(self): # We don't need to call port.stopListening(), because the reactor # shutdown sequence will do that for us. Service.stopService(self) logging.info("%s stopped with config %s" % ( self.service_name.capitalize(), self.config.get_config_filename())) def run_landscape_service(configuration_class, service_class, args): """Run a Landscape service. This function instantiates the specified L{LandscapeService} subclass and attaches the resulting service object to a Twisted C{Application}. After that it starts the Twisted L{Application} and calls the L{LandscapeReactor.run} method of the L{LandscapeService}'s reactor. @param configuration_class: The service-specific subclass of L{Configuration} used to parse C{args} and build the C{service_class} object. @param service_class: The L{LandscapeService} subclass to create and start. @param args: Command line arguments used to initialize the configuration. """ # Let's consider adding this: # from twisted.python.log import ( # startLoggingWithObserver, PythonLoggingObserver) # startLoggingWithObserver(PythonLoggingObserver().emit, setStdout=False) configuration = configuration_class() configuration.load(args) init_logging(configuration, service_class.service_name) application = Application("landscape-%s" % (service_class.service_name,)) service = service_class(configuration) service.setServiceParent(application) if configuration.clones > 0: # Increase the timeout of AMP's MethodCalls # XXX: we should find a better way to expose this knot, and # not set it globally on the class from landscape.lib.amp import MethodCallSender MethodCallSender.timeout = 300 # Create clones here because LandscapeReactor.__init__ would otherwise # cancel all scheduled delayed calls clones = [] for i in range(configuration.clones): clone_config = configuration.clone() clone_config.computer_title += " Clone %d" % i clone_config.master_data_path = configuration.data_path clone_config.data_path += "-clone-%d" % i clone_config.log_dir += "-clone-%d" % i clone_config.is_clone = True clones.append(service_class(clone_config)) configuration.is_clone = False def start_clones(): # Spawn instances over the given time window start_clones_over = float(configuration.start_clones_over) delay = start_clones_over / configuration.clones for i, clone in enumerate(clones): def start(clone): clone.setServiceParent(application) clone.reactor.fire("run") service.reactor.call_later(delay * (i + 1), start, clone=clone) service.reactor.call_when_running(start_clones) startApplication(application, False) if configuration.ignore_sigint: signal.signal(signal.SIGINT, signal.SIG_IGN) service.reactor.run() landscape-client-14.01/landscape/lib/0000755000175000017500000000000012301414317017235 5ustar andreasandreaslandscape-client-14.01/landscape/lib/fetch.py0000644000175000017500000001351212301414317020702 0ustar andreasandreasimport os import sys from optparse import OptionParser from StringIO import StringIO from twisted.internet.threads import deferToThread from twisted.internet.defer import DeferredList class FetchError(Exception): pass class HTTPCodeError(FetchError): def __init__(self, http_code, body): self.http_code = http_code self.body = body def __str__(self): return "Server returned HTTP code %d" % self.http_code def __repr__(self): return "" % self.http_code class PyCurlError(FetchError): def __init__(self, error_code, message): self.error_code = error_code self._message = message def __str__(self): return "Error %d: %s" % (self.error_code, self.message) def __repr__(self): return "" % (self.error_code, self.message) @property def message(self): return self._message def fetch(url, post=False, data="", headers={}, cainfo=None, curl=None, connect_timeout=30, total_timeout=600, insecure=False, follow=True): """Retrieve a URL and return the content. @param url: The url to be fetched. @param post: If true, the POST method will be used (defaults to GET). @param data: Data to be sent to the server as the POST content. @param headers: Dictionary of header => value entries to be used on the request. @param cainfo: Path to the file with CA certificates. @param insecure: If true, perform curl using insecure option which will not attempt to verify authenticity of the peer's certificate. (Used during autodiscovery) @param follow: If True, follow HTTP redirects (default True). """ import pycurl output = StringIO(data) input = StringIO() if curl is None: curl = pycurl.Curl() if post: curl.setopt(pycurl.POST, True) if data: curl.setopt(pycurl.POSTFIELDSIZE, len(data)) curl.setopt(pycurl.READFUNCTION, output.read) if cainfo and url.startswith("https:"): curl.setopt(pycurl.CAINFO, cainfo) if headers: curl.setopt(pycurl.HTTPHEADER, ["%s: %s" % pair for pair in sorted(headers.iteritems())]) if insecure: curl.setopt(pycurl.SSL_VERIFYPEER, False) curl.setopt(pycurl.URL, str(url)) if follow: curl.setopt(pycurl.FOLLOWLOCATION, 1) curl.setopt(pycurl.MAXREDIRS, 5) curl.setopt(pycurl.CONNECTTIMEOUT, connect_timeout) curl.setopt(pycurl.LOW_SPEED_LIMIT, 1) curl.setopt(pycurl.LOW_SPEED_TIME, total_timeout) curl.setopt(pycurl.NOSIGNAL, 1) curl.setopt(pycurl.WRITEFUNCTION, input.write) curl.setopt(pycurl.DNS_CACHE_TIMEOUT, 0) curl.setopt(pycurl.ENCODING, "gzip,deflate") try: curl.perform() except pycurl.error, e: raise PyCurlError(e.args[0], e.args[1]) body = input.getvalue() http_code = curl.getinfo(pycurl.HTTP_CODE) if http_code != 200: raise HTTPCodeError(http_code, body) return body def test(args): parser = OptionParser() parser.add_option("--post", action="store_true") parser.add_option("--data", default="") parser.add_option("--cainfo") options, (url,) = parser.parse_args(args) print fetch(url, post=options.post, data=options.data, cainfo=options.cainfo) def fetch_async(*args, **kwargs): """Retrieve a URL asynchronously. @return: A C{Deferred} resulting in the URL content. """ return deferToThread(fetch, *args, **kwargs) def fetch_many_async(urls, callback=None, errback=None, **kwargs): """ Retrieve a list of URLs asynchronously. @param callback: Optionally, a function that will be fired one time for each successful URL, and will be passed its content and the URL itself. @param errback: Optionally, a function that will be fired one time for each failing URL, and will be passed the failure and the URL itself. @return: A C{DeferredList} whose callback chain will be fired as soon as all downloads have terminated. If an error occurs, the errback chain of the C{DeferredList} will be fired immediatly. """ results = [] for url in urls: result = fetch_async(url, **kwargs) if callback: result.addCallback(callback, url) if errback: result.addErrback(errback, url) results.append(result) return DeferredList(results, fireOnOneErrback=True, consumeErrors=True) def url_to_filename(url, directory=None): """Return the last component of the given C{url}. @param url: The URL to get the filename from. @param directory: Optionally a path to prepend to the returned filename. @note: Any trailing slash in the C{url} will be removed """ filename = url.rstrip("/").split("/")[-1] if directory is not None: filename = os.path.join(directory, filename) return filename def fetch_to_files(urls, directory, logger=None, **kwargs): """ Retrieve a list of URLs and save their content as files in a directory. @param urls: The list URLs to fetch. @param directory: The directory to save the files to, the name of the file will equal the last fragment of the URL. @param logger: Optional function to be used to log errors for failed URLs. """ def write(data, url): filename = url_to_filename(url, directory=directory) fd = open(filename, "w") fd.write(data) fd.close() def log_error(failure, url): if logger: logger("Couldn't fetch file from %s (%s)" % ( url, str(failure.value))) return failure return fetch_many_async(urls, callback=write, errback=log_error, **kwargs) if __name__ == "__main__": test(sys.argv[1:]) landscape-client-14.01/landscape/lib/tag.py0000644000175000017500000000077112301414317020367 0ustar andreasandreasimport re _tag_check = re.compile("^\w+[\w-]*$", re.UNICODE) def is_valid_tag(tagname): """Return True if the tag meets our tag requirements.""" return _tag_check.match(tagname) def is_valid_tag_list(tag_list): """Validate a tag_list string. @param tag_list: string like london, server which will be split on the commas and each tag verified for validity. """ if not tag_list: return True return all(is_valid_tag(tag.strip()) for tag in tag_list.split(",")) landscape-client-14.01/landscape/lib/network.py0000644000175000017500000002034712301414317021306 0ustar andreasandreas""" Network introspection utilities using ioctl and the /proc filesystem. """ import array import fcntl import socket import struct import errno import logging __all__ = ["get_active_device_info", "get_network_traffic", "is_64"] # from header /usr/include/bits/ioctls.h SIOCGIFCONF = 0x8912 SIOCGIFFLAGS = 0x8913 SIOCGIFNETMASK = 0x891b SIOCGIFBRDADDR = 0x8919 SIOCGIFADDR = 0x8915 SIOCGIFHWADDR = 0x8927 SIOCETHTOOL = 0x8946 # As defined in include/uapi/linux/sockios.h ETHTOOL_GSET = 0x00000001 # Get status command. # struct definition from header /usr/include/net/if.h # the struct size varies according to the platform arch size # a minimal c program was used to determine the size of the # struct, standard headers removed for brevity. """ #include int main() { printf("Size of struct %lu\n", sizeof(struct ifreq)); } """ IF_STRUCT_SIZE_32 = 32 IF_STRUCT_SIZE_64 = 40 def is_64(): """Returns C{True} if the platform is 64-bit, otherwise C{False}.""" return struct.calcsize("l") == 8 # initialize the struct size as per the machine's architecture IF_STRUCT_SIZE = is_64() and IF_STRUCT_SIZE_64 or IF_STRUCT_SIZE_32 def get_active_interfaces(sock): """Generator yields active network interface names. @param sock: a socket instance. """ max_interfaces = 128 # Setup an array to hold our response, and initialized to null strings. interfaces = array.array("B", "\0" * max_interfaces * IF_STRUCT_SIZE) buffer_size = interfaces.buffer_info()[0] packed_bytes = struct.pack( "iL", max_interfaces * IF_STRUCT_SIZE, buffer_size) byte_length = struct.unpack( "iL", fcntl.ioctl(sock.fileno(), SIOCGIFCONF, packed_bytes))[0] result = interfaces.tostring() # Generator over the interface names already_found = set() for index in range(0, byte_length, IF_STRUCT_SIZE): ifreq_struct = result[index:index + IF_STRUCT_SIZE] interface_name = ifreq_struct[:ifreq_struct.index("\0")] if interface_name not in already_found: already_found.add(interface_name) yield interface_name def get_broadcast_address(sock, interface): """Return the broadcast address associated to an interface. @param sock: a socket instance. @param interface: The name of the interface. """ return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), SIOCGIFBRDADDR, struct.pack("256s", interface[:15]))[20:24]) def get_netmask(sock, interface): """Return the network mask associated to an interface. @param sock: a socket instance. @param interface: The name of the interface. """ return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), SIOCGIFNETMASK, struct.pack("256s", interface[:15]))[20:24]) def get_ip_address(sock, interface): """Return the IP address associated to the interface. @param sock: a socket instance. @param interface: The name of the interface. """ return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), SIOCGIFADDR, struct.pack("256s", interface[:15]))[20:24]) def get_mac_address(sock, interface): """ Return the hardware MAC address for an interface in human friendly form, ie. six colon separated groups of two hexadecimal digits. @param sock: a socket instance. @param interface: The name of the interface. """ mac_address = fcntl.ioctl( sock.fileno(), SIOCGIFHWADDR, struct.pack("256s", interface[:15])) return "".join(["%02x:" % ord(char) for char in mac_address[18:24]])[:-1] def get_flags(sock, interface): """Return the integer value of the interface flags for the given interface. @param sock: a socket instance. @param interface: The name of the interface. @see /usr/include/linux/if.h for the meaning of the flags. """ data = fcntl.ioctl( sock.fileno(), SIOCGIFFLAGS, struct.pack("256s", interface[:15])) return struct.unpack("H", data[16:18])[0] def get_active_device_info(skipped_interfaces=("lo",), skip_vlan=True, skip_alias=True): """ Returns a dictionary containing information on each active network interface present on a machine. """ results = [] try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) for interface in get_active_interfaces(sock): if interface in skipped_interfaces: continue if skip_vlan and "." in interface: continue if skip_alias and ":" in interface: continue interface_info = {"interface": interface} interface_info["ip_address"] = get_ip_address(sock, interface) interface_info["mac_address"] = get_mac_address(sock, interface) interface_info["broadcast_address"] = get_broadcast_address( sock, interface) interface_info["netmask"] = get_netmask(sock, interface) interface_info["flags"] = get_flags(sock, interface) speed, duplex = get_network_interface_speed(sock, interface) interface_info["speed"] = speed interface_info["duplex"] = duplex results.append(interface_info) finally: del sock return results def get_network_traffic(source_file="/proc/net/dev"): """ Retrieves an array of information regarding the network activity per network interface. """ netdev = open(source_file, "r") lines = netdev.readlines() netdev.close() # Parse out the column headers as keys. _, receive_columns, transmit_columns = lines[1].split("|") columns = ["recv_%s" % column for column in receive_columns.split()] columns.extend(["send_%s" % column for column in transmit_columns.split()]) # Parse out the network devices. devices = {} for line in lines[2:]: if not ":" in line: continue device, data = line.split(":") device = device.strip() devices[device] = dict(zip(columns, map(long, data.split()))) return devices def get_fqdn(): """ Return the current fqdn of the machine, trying hard to return a meaningful name. In particular, it means working against a NetworkManager bug which seems to make C{getfqdn} return localhost6.localdomain6 for machine without a domain since Maverick. """ fqdn = socket.getfqdn() if "localhost" in fqdn: # Try the heavy artillery fqdn = socket.getaddrinfo(socket.gethostname(), None, socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP, socket.AI_CANONNAME)[0][3] if "localhost" in fqdn: # Another fallback fqdn = socket.gethostname() return fqdn def get_network_interface_speed(sock, interface_name): """ Return the ethernet device's advertised link speed. The return value can be one of: * 10, 100, 1000, 2500, 10000: The interface speed in Mbps * -1: The interface does not support querying for max speed, such as virtio devices for instance. * 0: The cable is not connected to the interface. We cannot measure interface speed, but could if it was plugged in. """ cmd_struct = struct.pack('I39s', ETHTOOL_GSET, '\x00' * 39) status_cmd = array.array('B', cmd_struct) packed = struct.pack('16sP', interface_name, status_cmd.buffer_info()[0]) speed = -1 try: fcntl.ioctl(sock, SIOCETHTOOL, packed) # Status ioctl() call res = status_cmd.tostring() speed, duplex = struct.unpack('12xHB28x', res) except IOError as e: if e.errno == errno.EPERM: logging.warn("Could not determine network interface speed, " "operation not permitted.") elif e.errno != errno.EOPNOTSUPP: raise e speed = -1 duplex = False # Drivers apparently report speed as 65535 when the link is not available # (cable unplugged for example). if speed == 65535: speed = 0 # The drivers report "duplex" to be 255 when the information is not # available. We'll just assume it's False in that case. if duplex == 255: duplex = False duplex = bool(duplex) return speed, duplex landscape-client-14.01/landscape/lib/fs.py0000644000175000017500000000340212301414317020216 0ustar andreasandreas"""File-system utils""" import os import time def create_file(path, content): """Create a file with the given content. @param path: The path to the file. @param content: The content to be written in the file. """ fd = open(path, "w") fd.write(content) fd.close() def append_file(path, content): """Append a file with the given content. The file is created, if it doesn't exist already. @param path: The path to the file. @param content: The content to be written in the file at the end. """ fd = open(path, "a") fd.write(content) fd.close() def read_file(path, limit=None): """Return the content of the given file. @param path: The path to the file. @param limit: An optional read limit. If positive, read up to that number of bytes from the beginning of the file. If negative, read up to that number of bytes from the end of the file. @return content: The content of the file, possibly trimmed to C{limit}. """ fd = open(path, "r") if limit and os.path.getsize(path) > abs(limit): whence = 0 if limit < 0: whence = 2 fd.seek(limit, whence) content = fd.read() fd.close() return content def touch_file(path, offset_seconds=None): """Touch a file, creating it if it doesn't exist. @param path: the path to the file to be touched. @param offset_seconds: a signed integer number of seconds to offset the atime and mtime of the file from the current time. """ fd = open(path, "a") fd.close() if offset_seconds is not None: offset_time = long(time.time()) + offset_seconds touch_time = (offset_time, offset_time) else: touch_time = None os.utime(path, touch_time) landscape-client-14.01/landscape/lib/sysstats.py0000644000175000017500000000572512301414317021515 0ustar andreasandreasfrom twisted.internet.utils import getProcessOutputAndValue import os class CommandError(Exception): """Raised when an external command returns a non-zero status.""" class MemoryStats(object): def __init__(self, filename="/proc/meminfo"): data = {} for line in open(filename): if ":" in line: key, value = line.split(":", 1) if key in ["MemTotal", "SwapFree", "SwapTotal", "MemFree", "Buffers", "Cached"]: data[key] = int(value.split()[0]) self.total_memory = data["MemTotal"] // 1024 self.free_memory = (data["MemFree"] + data["Buffers"] + data["Cached"]) // 1024 self.total_swap = data["SwapTotal"] // 1024 self.free_swap = data["SwapFree"] // 1024 @property def used_memory(self): return self.total_memory - self.free_memory @property def used_swap(self): return self.total_swap - self.free_swap @property def free_memory_percentage(self): return (self.free_memory / float(self.total_memory)) * 100 @property def free_swap_percentage(self): if self.total_swap == 0: return 0.0 else: return (self.free_swap / float(self.total_swap)) * 100 @property def used_memory_percentage(self): return 100 - self.free_memory_percentage @property def used_swap_percentage(self): if self.total_swap == 0: return 0.0 else: return 100 - self.free_swap_percentage def get_logged_in_users(): result = getProcessOutputAndValue("who", ["-q"], env=os.environ) def parse_output((stdout_data, stderr_data, status)): if status != 0: raise CommandError(stderr_data) first_line = stdout_data.split("\n", 1)[0] return sorted(set(first_line.split())) return result.addCallback(parse_output) def get_thermal_zones(thermal_zone_path=None): if thermal_zone_path is None: thermal_zone_path = "/proc/acpi/thermal_zone" if os.path.isdir(thermal_zone_path): for zone_name in sorted(os.listdir(thermal_zone_path)): yield ThermalZone(thermal_zone_path, zone_name) class ThermalZone(object): temperature = None temperature_value = None temperature_unit = None def __init__(self, base_path, name): self.name = name self.path = os.path.join(base_path, name) temperature_path = os.path.join(self.path, "temperature") if os.path.isfile(temperature_path): for line in open(temperature_path): if line.startswith("temperature:"): self.temperature = line[12:].strip() try: value, unit = self.temperature.split() self.temperature_value = int(value) self.temperature_unit = unit except ValueError: pass landscape-client-14.01/landscape/lib/message.py0000644000175000017500000000364312301414317021241 0ustar andreasandreas"""Helpers for reliable persistent message queues.""" ANCIENT = 1 def got_next_expected(store, next_expected): """Our peer has told us what it expects our next message's sequence to be. Call this with the message store and sequence number that the peer wants next; this will do various things based on what *this* side has in its outbound queue store. 1. The peer expects a sequence greater than what we last sent. This is the common case and generally it should be expecting last_sent_sequence+len(messages_sent)+1. 2. The peer expects a sequence number our side has already sent, and we no longer have that message. In this case, just send *all* messages we have, including the previous generation, starting at the sequence number the peer expects (meaning that messages have probably been lost). 3. The peer expects a sequence number we already sent, and we still have that message cached. In this case, we send starting from that message. If the next expected sequence from the server refers to a message older than we have, then L{ANCIENT} will be returned. """ ret = None old_sequence = store.get_sequence() if next_expected > old_sequence: store.delete_old_messages() pending_offset = next_expected - old_sequence elif next_expected < (old_sequence - store.get_pending_offset()): # "Ancient": The other side wants messages we don't have, # so let's just reset our counter to what it expects. pending_offset = 0 ret = ANCIENT else: # No messages transferred, or # "Old": We'll try to send these old messages that the # other side still wants. pending_offset = (store.get_pending_offset() + next_expected - old_sequence) store.set_pending_offset(pending_offset) store.set_sequence(next_expected) return ret landscape-client-14.01/landscape/lib/cloud.py0000644000175000017500000000314312301414317020716 0ustar andreasandreasfrom landscape.lib.fetch import fetch_async EC2_HOST = "169.254.169.254" EC2_API = "http://%s/latest" % (EC2_HOST,) MAX_LENGTH = 64 def fetch_ec2_meta_data(fetch=None): """Fetch EC2 information about the cloud instance. The C{fetch} parameter provided above is non-mocker testing purposes. """ cloud_data = [] # We're not using a DeferredList here because we want to keep the # number of connections to the backend minimal. See lp:567515. deferred = _fetch_ec2_item("instance-id", cloud_data, fetch) deferred.addCallback( lambda ignore: _fetch_ec2_item("instance-type", cloud_data, fetch)) deferred.addCallback( lambda ignore: _fetch_ec2_item("ami-id", cloud_data, fetch)) def return_result(ignore): """Record the instance data returned by the EC2 API.""" def _process_result(value): if value is not None: return value.decode("utf-8")[:MAX_LENGTH] (instance_id, instance_type, ami_id) = cloud_data return { "instance-id": _process_result(instance_id), "ami-id": _process_result(ami_id), "instance-type": _process_result(instance_type)} deferred.addCallback(return_result) return deferred def _fetch_ec2_item(path, accumulate, fetch=None): """ Get data at C{path} on the EC2 API endpoint, and add the result to the C{accumulate} list. The C{fetch} parameter is provided for testing only. """ url = EC2_API + "/meta-data/" + path if fetch is None: fetch = fetch_async return fetch(url, follow=False).addCallback(accumulate.append) landscape-client-14.01/landscape/lib/lsb_release.py0000644000175000017500000000157512301414317022077 0ustar andreasandreas"""Get information from /etc/lsb_release.""" LSB_RELEASE_FILENAME = "/etc/lsb-release" LSB_RELEASE_INFO_KEYS = {"DISTRIB_ID": "distributor-id", "DISTRIB_DESCRIPTION": "description", "DISTRIB_RELEASE": "release", "DISTRIB_CODENAME": "code-name"} def parse_lsb_release(lsb_release_filename): """Return a C{dict} holding information about the system LSB release. @raises: An IOError exception if C{lsb_release_filename} could not be read. """ fd = open(lsb_release_filename, "r") info = {} try: for line in fd: key, value = line.split("=") if key in LSB_RELEASE_INFO_KEYS: key = LSB_RELEASE_INFO_KEYS[key.strip()] value = value.strip().strip('"') info[key] = value finally: fd.close() return info landscape-client-14.01/landscape/lib/bpickle.py0000644000175000017500000001154312301414317021224 0ustar andreasandreas""" Copyright (c) 2006, Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ dumps_table = {} loads_table = {} def dumps(obj, _dt=dumps_table): try: return _dt[type(obj)](obj) except KeyError, e: raise ValueError, "Unsupported type: %s" % e def loads(str, _lt=loads_table): if not str: raise ValueError, "Can't load empty string" try: return _lt[str[0]](str, 0)[0] except KeyError, e: raise ValueError, "Unknown type character: %s" % e except IndexError: raise ValueError, "Corrupted data" def dumps_bool(obj): return "b%d" % int(obj) def dumps_int(obj): return "i%s;" % obj def dumps_float(obj): return "f%r;" % obj def dumps_str(obj): return "s%s:%s" % (len(obj), obj) def dumps_unicode(obj): obj = obj.encode("utf-8") return "u%s:%s" % (len(obj), obj) def dumps_list(obj, _dt=dumps_table): return "l%s;" % "".join([_dt[type(val)](val) for val in obj]) def dumps_tuple(obj, _dt=dumps_table): return "t%s;" % "".join([_dt[type(val)](val) for val in obj]) def dumps_dict(obj, _dt=dumps_table): keys = obj.keys() keys.sort() res = [] append = res.append for key in keys: val = obj[key] append(_dt[type(key)](key)) append(_dt[type(val)](val)) return "d%s;" % "".join(res) def dumps_none(obj): return "n" def loads_bool(str, pos): return bool(int(str[pos+1])), pos+2 def loads_int(str, pos): endpos = str.index(";", pos) return int(str[pos+1:endpos]), endpos+1 def loads_float(str, pos): endpos = str.index(";", pos) return float(str[pos+1:endpos]), endpos+1 def loads_str(str, pos): startpos = str.index(":", pos)+1 endpos = startpos+int(str[pos+1:startpos-1]) return str[startpos:endpos], endpos def loads_unicode(str, pos): startpos = str.index(":", pos)+1 endpos = startpos+int(str[pos+1:startpos-1]) return str[startpos:endpos].decode("utf-8"), endpos def loads_list(str, pos, _lt=loads_table): pos += 1 res = [] append = res.append while str[pos] != ";": obj, pos = _lt[str[pos]](str, pos) append(obj) return res, pos+1 def loads_tuple(str, pos, _lt=loads_table): pos += 1 res = [] append = res.append while str[pos] != ";": obj, pos = _lt[str[pos]](str, pos) append(obj) return tuple(res), pos+1 def loads_dict(str, pos, _lt=loads_table): pos += 1 res = {} while str[pos] != ";": key, pos = _lt[str[pos]](str, pos) val, pos = _lt[str[pos]](str, pos) res[key] = val return res, pos+1 def loads_none(str, pos): return None, pos+1 dumps_table.update({ bool: dumps_bool, int: dumps_int, long: dumps_int, float: dumps_float, str: dumps_str, unicode: dumps_unicode, list: dumps_list, tuple: dumps_tuple, dict: dumps_dict, type(None): dumps_none }) loads_table.update({ "b": loads_bool, "i": loads_int, "f": loads_float, "s": loads_str, "u": loads_unicode, "l": loads_list, "t": loads_tuple, "d": loads_dict, "n": loads_none }) landscape-client-14.01/landscape/lib/warning.py0000644000175000017500000000061212301414317021253 0ustar andreasandreas"""Warning utilities for Landscape.""" import warnings def hide_warnings(): """Disable printing of non-UserWarning warnings. This should be used for any programs that are being run by a user in a production environment: warnings that aren't UserWarnings are meant for developers. """ warnings.simplefilter("ignore") warnings.simplefilter("default", UserWarning) landscape-client-14.01/landscape/lib/sequenceranges.py0000644000175000017500000001306512301414317022624 0ustar andreasandreas class SequenceError(Exception): """Raised when the sequence isn't proper for translation to ranges.""" class SequenceRanges(object): """High level interface to ranges. A ranges list represent a sequence of ordered and non-repeating elements into a more compact format, by representing 3 or more consecutive entries by a range. This means that a sequence such as [1, 2, 4, 5, 6, 8, 10, 11, 12, 14] becomes [1, 2, (4, 6), 8, (10, 12), 14] """ def __init__(self): self._ranges = [] @classmethod def from_sequence(cls, sequence): obj = cls() obj._ranges[:] = sequence_to_ranges(sequence) return obj @classmethod def from_ranges(cls, ranges): obj = cls() obj._ranges[:] = ranges return obj def to_sequence(self): return list(ranges_to_sequence(self._ranges)) def to_ranges(self): return list(self._ranges) def __iter__(self): return ranges_to_sequence(self._ranges) def __contains__(self, item): index = find_ranges_index(self._ranges, item) if index < len(self._ranges): test = self._ranges[index] if isinstance(test, tuple): return (test[0] <= item <= test[1]) return (test == item) return False def add(self, item): add_to_ranges(self._ranges, item) def remove(self, item): remove_from_ranges(self._ranges, item) def sequence_to_ranges(sequence): """Iterate over range items that compose the given sequence.""" iterator = iter(sequence) try: range_start = range_stop = iterator.next() except StopIteration: return while range_start is not None: try: item = iterator.next() except StopIteration: item = None if item == range_stop + 1: range_stop += 1 else: if item is not None and item <= range_stop: if item < range_stop: raise SequenceError("Sequence is unordered (%r < %r)" % (item, range_stop)) else: raise SequenceError("Found duplicated item (%r)" % (item,)) if range_stop == range_start: yield range_start elif range_stop == range_start + 1: yield range_start yield range_stop else: yield (range_start, range_stop) range_start = range_stop = item def ranges_to_sequence(ranges): """Iterate over individual items represented in a ranges list.""" for item in ranges: if isinstance(item, tuple): start, end = item if start > end: raise ValueError("Range error %d > %d", start, end) for item in xrange(start, end + 1): yield item else: yield item def find_ranges_index(ranges, item): """Find the index where an entry *may* be.""" lo = 0 hi = len(ranges) while lo < hi: mid = (lo + hi) // 2 test = ranges[mid] try: test = test[1] except TypeError: pass if item > test: lo = mid + 1 else: hi = mid return lo def add_to_ranges(ranges, item): """Insert item in ranges, reorganizing as needed.""" index_start = index_stop = index = find_ranges_index(ranges, item) range_start = range_stop = item ranges_len = len(ranges) # Look for duplicates. if index < ranges_len: test = ranges[index] if isinstance(test, tuple): if test[0] <= item <= test[1]: return elif test == item: return # Merge to the left side. while index_start > 0: test = ranges[index_start - 1] if isinstance(test, tuple): if test[1] != range_start - 1: break range_start = test[0] else: if test != range_start - 1: break range_start -= 1 index_start -= 1 # Merge to the right side. while index_stop < ranges_len: test = ranges[index_stop] if isinstance(test, tuple): if test[0] != range_stop + 1: break range_stop = test[1] else: if test != range_stop + 1: break range_stop += 1 index_stop += 1 if range_stop - range_start < 2: ranges.insert(index, item) else: ranges[index_start:index_stop] = ((range_start, range_stop),) def remove_from_ranges(ranges, item): """Remove item from ranges, reorganizing as needed.""" index = find_ranges_index(ranges, item) ranges_len = len(ranges) if index < ranges_len: test = ranges[index] if isinstance(test, tuple): range_start, range_stop = test if item >= range_start: # Handle right side of the range (and replace original item). if range_stop < item + 3: ranges[index:index + 1] = range(item + 1, range_stop + 1) else: ranges[index:index + 1] = ((item + 1, range_stop),) # Handle left side of the range. if range_start > item - 3: if range_start != item: ranges[index:index] = range(range_start, item) else: ranges[index:index] = ((range_start, item - 1),) elif item == test: del ranges[index] landscape-client-14.01/landscape/lib/lock.py0000644000175000017500000000130112301414317020532 0ustar andreasandreasimport fcntl import time import os class LockError(Exception): """Raised when unable to lock a file.""" def lock_path(path, timeout=0): fd = os.open(path, os.O_CREAT) flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) started = time.time() while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: if started < time.time() - timeout: raise LockError("Couldn't obtain lock") else: break time.sleep(0.1) def unlock_path(): fcntl.flock(fd, fcntl.LOCK_UN) os.close(fd) return unlock_path landscape-client-14.01/landscape/lib/bootstrap.py0000644000175000017500000000261012301414317021623 0ustar andreasandreasfrom string import Template import pwd import grp import os class BootstrapList(object): def __init__(self, bootstraps): self._bootstraps = bootstraps def bootstrap(self, **vars): for bootstrap in self._bootstraps: bootstrap.bootstrap(**vars) class BootstrapPath(object): def __init__(self, path, username=None, group=None, mode=None): self.path = path self.username = username self.group = group self.mode = mode def _create(self, path): pass def bootstrap(self, **vars): path = Template(self.path).substitute(**vars) self._create(path) if self.mode is not None: os.chmod(path, self.mode) if os.getuid() == 0: if self.username is not None: uid = pwd.getpwnam(self.username).pw_uid else: uid = -1 if self.group is not None: gid = grp.getgrnam(self.group).gr_gid else: gid = -1 if uid != -1 or gid != -1: os.chown(path, uid, gid) class BootstrapFile(BootstrapPath): def _create(self, path): open(path, "a").close() class BootstrapDirectory(BootstrapPath): def _create(self, path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise landscape-client-14.01/landscape/lib/juju.py0000644000175000017500000000135012301414317020563 0ustar andreasandreasimport json import logging import os.path from landscape.lib.fs import read_file def get_juju_info(config): """ Returns the Juju info or C{None} if the path referenced from L{config} is not a file or that file isn't valid JSON. """ juju_filename = config.juju_filename if not os.path.isfile(juju_filename): return None json_contents = read_file(juju_filename) try: juju_info = json.loads(json_contents) except Exception: logging.exception( "Error attempting to read JSON from %s" % juju_filename) return None else: if "api-addresses" in juju_info: juju_info["api-addresses"] = juju_info["api-addresses"].split() return juju_info landscape-client-14.01/landscape/lib/vm_info.py0000644000175000017500000000371612301414317021253 0ustar andreasandreas""" Network introspection utilities using ioctl and the /proc filesystem. """ import os from landscape.lib.fs import read_file def get_vm_info(root_path="/"): """ Return a string with the virtualization type if it's known, an empty string otherwise. It loops through some possible configurations and return a string with the name of the technology being used or None if there's no match """ def join_root_path(path): return os.path.join(root_path, path) xen_paths = ["proc/sys/xen", "proc/xen"] xen_paths = map(join_root_path, xen_paths) vz_path = join_root_path("proc/vz") if os.path.exists(vz_path): return "openvz" elif filter(os.path.exists, xen_paths): return "xen" # /sys/bus/xen exists on most machines, but only virtual machines have # devices sys_xen_path = join_root_path("sys/bus/xen/devices") if os.path.isdir(sys_xen_path) and os.listdir(sys_xen_path): return "xen" sys_vendor_path = join_root_path("sys/class/dmi/id/sys_vendor") if not os.path.exists(sys_vendor_path): # Some virtualised CPU architectures indicate this via cpuinfo cpuinfo_path = join_root_path("proc/cpuinfo") if os.path.exists(cpuinfo_path): cpuinfo = read_file(cpuinfo_path) if "qemu" in cpuinfo: return "kvm" return "" vendor = read_file(sys_vendor_path) content_vendors_map = ( ("VMware, Inc.", "vmware"), ("Microsoft Corporation", "hyperv"), ("Bochs", "kvm"), ("OpenStack", "kvm"), ("innotek GmbH", "virtualbox")) for name, vm_type in content_vendors_map: if name in vendor: return vm_type return "" def get_container_info(path="/run/container_type"): """ Return a string with the type of container the client is running in, if any, an empty string otherwise. """ return read_file(path).strip() if os.path.exists(path) else "" landscape-client-14.01/landscape/lib/store.py0000644000175000017500000000256212301414317020750 0ustar andreasandreas"""Functions used by all sqlite-backed stores.""" try: import sqlite3 except ImportError: from pysqlite2 import dbapi2 as sqlite3 def with_cursor(method): """Decorator that encloses the method in a database transaction. Even though SQLite is supposed to be useful in autocommit mode, we've found cases where the database continued to be locked for writing until the cursor was closed. With this in mind, instead of using the autocommit mode, we explicitly terminate transactions and enforce cursor closing with this decorator. """ def inner(self, *args, **kwargs): if not self._db: # Create the database connection only when we start to actually # use it. This is essentially just a workaroud of a sqlite bug # happening when 2 concurrent processes try to create the tables # around the same time, the one which fails having an incorrect # cache and not seeing the tables self._db = sqlite3.connect(self._filename) self._ensure_schema() try: cursor = self._db.cursor() try: result = method(self, cursor, *args, **kwargs) finally: cursor.close() self._db.commit() except: self._db.rollback() raise return result return inner landscape-client-14.01/landscape/lib/log.py0000644000175000017500000000067412301414317020377 0ustar andreasandreasimport logging def log_failure(failure, msg=None, logger=None): """Log a L{twisted.python.failure.Failure} to the Python L{logging} module. The failure should be formatted as a regular exception, but a traceback may not be available. If C{msg} is passed, it will included before the traceback. """ if logger is None: logger = logging logger.error(msg, exc_info=(failure.type, failure.value, failure.tb)) landscape-client-14.01/landscape/lib/twisted_util.py0000644000175000017500000001007412301414317022331 0ustar andreasandreasfrom twisted.internet.defer import DeferredList, Deferred from twisted.internet.protocol import ProcessProtocol from twisted.internet.process import Process, ProcessReader from twisted.internet import reactor import cStringIO def gather_results(deferreds, consume_errors=False): d = DeferredList(deferreds, fireOnOneErrback=1, consumeErrors=consume_errors) d.addCallback(lambda r: [x[1] for x in r]) d.addErrback(lambda f: f.value.subFailure) return d class AllOutputProcessProtocol(ProcessProtocol): """A process protocol for getting stdout, stderr and exit code.""" def __init__(self, deferred, stdin=None, line_received=None): self.deferred = deferred self.outBuf = cStringIO.StringIO() self.errBuf = cStringIO.StringIO() self.errReceived = self.errBuf.write self.stdin = stdin self.line_received = line_received self._partial_line = "" def connectionMade(self): if self.stdin is not None: self.transport.write(self.stdin) self.transport.closeStdin() def outReceived(self, data): self.outBuf.write(data) if self.line_received is None: return # data may contain more than one line, so we split the output and save # the last line. If it's an empty string nothing happens, otherwise it # will be returned once complete lines = data.split("\n") lines[0] = self._partial_line + lines[0] self._partial_line = lines.pop() for line in lines: self.line_received(line) def processEnded(self, reason): if self._partial_line: self.line_received(self._partial_line) self._partial_line = "" out = self.outBuf.getvalue() err = self.errBuf.getvalue() e = reason.value code = e.exitCode if e.signal: self.deferred.errback((out, err, e.signal)) else: self.deferred.callback((out, err, code)) def spawn_process(executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=False, wait_pipes=True, line_received=None, stdin=None): """ Spawn a process using Twisted reactor. Return a deferred which will be called with process stdout, stderr and exit code. @param wait_pipes: if set to False, don't wait for stdin/stdout pipes to close when process ends. @param line_received: an optional callback called with every line of output from the process as parameter. @note: compared to reactor.spawnProcess, this version does NOT require the executable name as first element of args. """ list_args = [executable] list_args.extend(args) result = Deferred() protocol = AllOutputProcessProtocol(result, stdin=stdin, line_received=line_received) process = reactor.spawnProcess(protocol, executable, args=list_args, env=env, path=path, uid=uid, gid=gid, usePTY=usePTY) if not wait_pipes: def maybeCallProcessEnded(): """A less strict version of Process.maybeCallProcessEnded. This behaves exactly like the original method, but in case the process has ended already and sent us a SIGCHLD, it doesn't wait for the stdin/stdout pipes to close, because the child process itself might have passed them to its own child processes. @note: Twisted 8.2 now has a processExited hook that could be used in place of this workaround. """ if process.pipes and not process.pid: for pipe in process.pipes.itervalues(): if isinstance(pipe, ProcessReader): # Read whatever is left pipe.doRead() pipe.stopReading() process.pipes = {} Process.maybeCallProcessEnded(process) process.maybeCallProcessEnded = maybeCallProcessEnded return result landscape-client-14.01/landscape/lib/scriptcontent.py0000644000175000017500000000062512301414317022511 0ustar andreasandreasfrom landscape.lib.hashlib import md5 def build_script(interpreter, code): """ Concatenates a interpreter and script into an executable script. """ return "#!%s\n%s" % ((interpreter or u"").encode("utf-8"), (code or u"").encode("utf-8")) def generate_script_hash(script): """ Return a hash for a given script. """ return md5(script).hexdigest() landscape-client-14.01/landscape/lib/jiffies.py0000644000175000017500000000312512301414317021227 0ustar andreasandreasimport os def detect_jiffies(): """Returns the number of jiffies per second for this machine. A jiffy is a value used by the kernel to report certain time-based events. Jiffies occur N times per second where N varies depending on the hardware the kernel is running on. This function gets the uptime for the current process, forks a child process and gets the uptime again; finally, using the running time of the child process compared with the uptimes to determine number of jiffies per second. """ uptime1_file = open("/proc/uptime") uptime2_file = open("/proc/uptime") read_uptime1 = uptime1_file.read read_uptime2 = uptime2_file.read while True: uptime1_data = read_uptime1() # Fork a process and exit immediately; this results in the # child process being left around as a zombie until waitpid() # is called. pid = os.fork() if pid == 0: os._exit(0) uptime2_data = read_uptime2() stat_file = open("/proc/%d/stat" % pid) stat_data = stat_file.read() stat_file.close() os.waitpid(pid, 0) seconds_uptime1 = float(uptime1_data.split()[0]) seconds_uptime2 = float(uptime2_data.split()[0]) jiffie_uptime = int(stat_data.split()[21]) jiffies1 = int(jiffie_uptime/seconds_uptime1+0.5) jiffies2 = int(jiffie_uptime/seconds_uptime2+0.5) if jiffies1 == jiffies2: break uptime1_file.seek(0) uptime2_file.seek(0) uptime1_file.close() uptime2_file.close() return jiffies1 landscape-client-14.01/landscape/lib/process.py0000644000175000017500000001501312301414317021265 0ustar andreasandreasimport logging import os from datetime import timedelta, datetime from landscape.lib.timestamp import to_timestamp from landscape.lib.jiffies import detect_jiffies # FIXME: It'd be nice to avoid having library code which depends on # landscape-specific modules. from landscape.monitor.computeruptime import BootTimes, get_uptime class ProcessInformation(object): """ @param proc_dir: The directory to use for process information. @param jiffies: The value to use for jiffies per second. @param boot_time: An alternate value to use for the last boot time. If None, the system last boot time will be used. @param uptime: The uptime value to use (for unit tests only). """ def __init__(self, proc_dir="/proc", jiffies=None, boot_time=None, uptime=None): if boot_time is None: boot_time = BootTimes().get_last_boot_time() if boot_time is not None: boot_time = datetime.utcfromtimestamp(boot_time) self._boot_time = boot_time self._proc_dir = proc_dir self._jiffies_per_sec = jiffies or detect_jiffies() self._uptime = uptime def get_all_process_info(self): """Get process information for all processes on the system.""" for filename in os.listdir(self._proc_dir): try: process_id = int(filename) except ValueError: continue process_info = self.get_process_info(process_id) if process_info: yield process_info def get_process_info(self, process_id): """ Parse the /proc//cmdline and /proc//status files for information about the running process with process_id. The /proc filesystem doesn't behave like ext2, open files can disappear during the read process. """ cmd_line_name = "" process_dir = os.path.join(self._proc_dir, str(process_id)) process_info = {"pid": process_id} try: file = open(os.path.join(process_dir, "cmdline"), "r") try: # cmdline is a \0 separated list of strings # We take the first, and then strip off the path, leaving # us with the basename. cmd_line = file.readline() cmd_line_name = os.path.basename(cmd_line.split("\0")[0]) finally: file.close() file = open(os.path.join(process_dir, "status"), "r") try: for line in file: parts = line.split(":", 1) if parts[0] == "Name": process_info["name"] = (cmd_line_name.strip() or parts[1].strip()) elif parts[0] == "State": state = parts[1].strip() # In Lucid, capital T is used for both tracing stop # and stopped. Starting with Natty, lowercase t is # used for tracing stop. if state == "T (tracing stop)": state = state.lower() process_info["state"] = state[0] elif parts[0] == "Uid": value_parts = parts[1].split() process_info["uid"] = int(value_parts[0]) elif parts[0] == "Gid": value_parts = parts[1].split() process_info["gid"] = int(value_parts[0]) elif parts[0] == "VmSize": value_parts = parts[1].split() process_info["vm-size"] = int(value_parts[0]) break finally: file.close() file = open(os.path.join(process_dir, "stat"), "r") try: # These variable names are lifted directly from proc(5) # utime: The number of jiffies that this process has been # scheduled in user mode. # stime: The number of jiffies that this process has been # scheduled in kernel mode. # cutime: The number of jiffies that this process's waited-for # children have been scheduled in user mode. # cstime: The number of jiffies that this process's waited-for # children have been scheduled in kernel mode. parts = file.read().split() start_time = int(parts[21]) utime = int(parts[13]) stime = int(parts[14]) uptime = self._uptime or get_uptime() pcpu = calculate_pcpu(utime, stime, uptime, start_time, self._jiffies_per_sec) process_info["percent-cpu"] = pcpu delta = timedelta(0, start_time // self._jiffies_per_sec) if self._boot_time is None: logging.warning( "Skipping process (PID %s) without boot time.") return None process_info["start-time"] = to_timestamp( self._boot_time + delta) finally: file.close() except IOError: # Handle the race that happens when we find a process # which terminates before we open the stat file. return None assert("pid" in process_info and "state" in process_info and "name" in process_info and "uid" in process_info and "gid" in process_info and "start-time" in process_info) return process_info def calculate_pcpu(utime, stime, uptime, start_time, hertz): """ Implement ps' algorithm to calculate the percentage cpu utilisation for a process.:: unsigned long long total_time; /* jiffies used by this process */ unsigned pcpu = 0; /* scaled %cpu, 99 means 99% */ unsigned long long seconds; /* seconds of process life */ total_time = pp->utime + pp->stime; if(include_dead_children) total_time += (pp->cutime + pp->cstime); seconds = seconds_since_boot - pp->start_time / hertz; if(seconds) pcpu = (total_time * 100ULL / hertz) / seconds; if (pcpu > 99U) pcpu = 99U; return snprintf(outbuf, COLWID, "%2u", pcpu); """ pcpu = 0 total_time = utime + stime seconds = uptime - (start_time / hertz) if seconds: pcpu = total_time * 100 / hertz / seconds return round(max(min(pcpu, 99.0), 0), 1) landscape-client-14.01/landscape/lib/gpg.py0000644000175000017500000000241312301414317020364 0ustar andreasandreasimport shutil import tempfile from twisted.internet.utils import getProcessOutputAndValue class InvalidGPGSignature(Exception): """Raised when the gpg signature for a given file is invalid.""" def gpg_verify(filename, signature, gpg="/usr/bin/gpg"): """Verify the GPG signature of a file. @param filename: Path to the file to verify the signature against. @param signature: Path to signature to use. @param gpg: Optionally, path to the GPG binary to use. @return: a C{Deferred} resulting in C{True} if the signature is valid, C{False} otherwise. """ def remove_gpg_home(ignored): shutil.rmtree(gpg_home) return ignored def check_gpg_exit_code((out, err, code)): if code != 0: raise InvalidGPGSignature("%s failed (out='%s', err='%s', " "code='%d')" % (gpg, out, err, code)) gpg_home = tempfile.mkdtemp() args = ("--no-options", "--homedir", gpg_home, "--no-default-keyring", "--ignore-time-conflict", "--keyring", "/etc/apt/trusted.gpg", "--verify", signature, filename) result = getProcessOutputAndValue(gpg, args=args) result.addBoth(remove_gpg_home) result.addCallback(check_gpg_exit_code) return result landscape-client-14.01/landscape/lib/md5crypt.py0000644000175000017500000000775012301414317021367 0ustar andreasandreas######################################################### # md5crypt.py # # 0423.2000 by michal wallace http://www.sabren.com/ # based on perl's Crypt::PasswdMD5 by Luis Munoz (lem@cantv.net) # based on /usr/src/libcrypt/crypt.c from FreeBSD 2.2.5-RELEASE # # MANY THANKS TO # # Carey Evans - http://home.clear.net.nz/pages/c.evans/ # Dennis Marti - http://users.starpower.net/marti1/ # # For the patches that got this thing working! # ######################################################### """md5crypt.py - Provides interoperable MD5-based crypt() function SYNOPSIS import md5crypt.py cryptedpassword = md5crypt.md5crypt(password, salt); DESCRIPTION unix_md5_crypt() provides a crypt()-compatible interface to the rather new MD5-based crypt() function found in modern operating systems. It's based on the implementation found on FreeBSD 2.2.[56]-RELEASE and contains the following license in it: "THE BEER-WARE LICENSE" (Revision 42): wrote this file. As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp apache_md5_crypt() provides a function compatible with Apache's .htpasswd files. This was contributed by Bryan Hart . """ MAGIC = '$1$' # Magic string ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" from landscape.lib.hashlib import md5 def to64 (v, n): ret = '' while (n - 1 >= 0): n = n - 1 ret = ret + ITOA64[v & 0x3f] v = v >> 6 return ret def apache_md5_crypt (pw, salt): # change the Magic string to match the one used by Apache return unix_md5_crypt(pw, salt, '$apr1$') def unix_md5_crypt(pw, salt, magic=None): if magic==None: magic = MAGIC # Take care of the magic string if present if salt[:len(magic)] == magic: salt = salt[len(magic):] # salt can have up to 8 characters: import string salt = string.split(salt, '$', 1)[0] salt = salt[:8] ctx = pw + magic + salt final = md5(pw + salt + pw).digest() for pl in range(len(pw),0,-16): if pl > 16: ctx = ctx + final[:16] else: ctx = ctx + final[:pl] # Now the 'weird' xform (??) i = len(pw) while i: if i & 1: ctx = ctx + chr(0) #if ($i & 1) { $ctx->add(pack("C", 0)); } else: ctx = ctx + pw[0] i = i >> 1 final = md5(ctx).digest() # The following is supposed to make # things run slower. # my question: WTF??? for i in range(1000): ctx1 = '' if i & 1: ctx1 = ctx1 + pw else: ctx1 = ctx1 + final[:16] if i % 3: ctx1 = ctx1 + salt if i % 7: ctx1 = ctx1 + pw if i & 1: ctx1 = ctx1 + final[:16] else: ctx1 = ctx1 + pw final = md5(ctx1).digest() # Final xform passwd = '' passwd = passwd + to64((int(ord(final[0])) << 16) |(int(ord(final[6])) << 8) |(int(ord(final[12]))),4) passwd = passwd + to64((int(ord(final[1])) << 16) |(int(ord(final[7])) << 8) |(int(ord(final[13]))), 4) passwd = passwd + to64((int(ord(final[2])) << 16) |(int(ord(final[8])) << 8) |(int(ord(final[14]))), 4) passwd = passwd + to64((int(ord(final[3])) << 16) |(int(ord(final[9])) << 8) |(int(ord(final[15]))), 4) passwd = passwd + to64((int(ord(final[4])) << 16) |(int(ord(final[10])) << 8) |(int(ord(final[5]))), 4) passwd = passwd + to64((int(ord(final[11]))), 2) return magic + salt + '$' + passwd ## assign a wrapper function: md5crypt = unix_md5_crypt if __name__ == "__main__": print unix_md5_crypt("cat", "hat") landscape-client-14.01/landscape/lib/tests/0000755000175000017500000000000012301414317020377 5ustar andreasandreaslandscape-client-14.01/landscape/lib/tests/test_sysstats.py0000644000175000017500000001636212301414317023715 0ustar andreasandreasimport os import re from landscape.lib.sysstats import ( MemoryStats, CommandError, get_logged_in_users, get_thermal_zones) from landscape.tests.helpers import LandscapeTest, EnvironSaverHelper SAMPLE_MEMORY_INFO = """ MemTotal: 1546436 kB MemFree: 23452 kB Buffers: 41656 kB Cached: 807628 kB SwapCached: 17572 kB Active: 1030792 kB Inactive: 426892 kB HighTotal: 0 kB HighFree: 0 kB LowTotal: 1546436 kB LowFree: 23452 kB SwapTotal: 1622524 kB SwapFree: 1604936 kB Dirty: 1956 kB Writeback: 0 kB Mapped: 661772 kB Slab: 54980 kB CommitLimit: 2395740 kB Committed_AS: 1566888 kB PageTables: 2728 kB VmallocTotal: 516088 kB VmallocUsed: 5660 kB VmallocChunk: 510252 kB """ class MemoryStatsTest(LandscapeTest): def test_get_memory_info(self): filename = self.makeFile(SAMPLE_MEMORY_INFO) memstats = MemoryStats(filename) self.assertEqual(memstats.total_memory, 1510) self.assertEqual(memstats.free_memory, 852) self.assertEqual(memstats.used_memory, 658) self.assertEqual(memstats.total_swap, 1584) self.assertEqual(memstats.free_swap, 1567) self.assertEqual(memstats.used_swap, 17) self.assertEqual("%.2f" % memstats.free_memory_percentage, "56.42") self.assertEqual("%.2f" % memstats.free_swap_percentage, "98.93") self.assertEqual("%.2f" % memstats.used_memory_percentage, "43.58") self.assertEqual("%.2f" % memstats.used_swap_percentage, "1.07") def test_get_memory_info_without_swap(self): sample = re.subn(r"Swap(Free|Total): *\d+ kB", r"Swap\1: 0", SAMPLE_MEMORY_INFO)[0] filename = self.makeFile(sample) memstats = MemoryStats(filename) self.assertEqual(memstats.total_swap, 0) self.assertEqual(memstats.free_swap, 0) self.assertEqual(memstats.used_swap, 0) self.assertEqual(memstats.used_swap_percentage, 0) self.assertEqual(memstats.free_swap_percentage, 0) self.assertEqual(type(memstats.used_swap_percentage), float) self.assertEqual(type(memstats.free_swap_percentage), float) class FakeWhoQTest(LandscapeTest): helpers = [EnvironSaverHelper] def fake_who(self, users): dirname = self.makeDir() os.environ["PATH"] = "%s:%s" % (dirname, os.environ["PATH"]) self.who_path = os.path.join(dirname, "who") who = open(self.who_path, "w") who.write("#!/bin/sh\n") who.write("test x$1 = x-q || echo missing-parameter\n") who.write("echo %s\n" % users) who.write("echo '# users=%d'\n" % len(users.split())) who.close() os.chmod(self.who_path, 0770) class LoggedInUsersTest(FakeWhoQTest): def test_one_user(self): self.fake_who("joe") result = get_logged_in_users() result.addCallback(self.assertEqual, ["joe"]) return result def test_one_user_multiple_times(self): self.fake_who("joe joe joe joe") result = get_logged_in_users() result.addCallback(self.assertEqual, ["joe"]) return result def test_many_users(self): self.fake_who("joe moe boe doe") result = get_logged_in_users() result.addCallback(self.assertEqual, ["boe", "doe", "joe", "moe"]) return result def test_command_error(self): self.fake_who("") who = open(self.who_path, "w") who.write("#!/bin/sh\necho ERROR 1>&2\nexit 1\n") who.close() result = get_logged_in_users() def assert_failure(failure): failure.trap(CommandError) self.assertEqual(str(failure.value), "ERROR\n") result.addErrback(assert_failure) return result class ThermalZoneTest(LandscapeTest): def setUp(self): super(ThermalZoneTest, self).setUp() self.thermal_zone_path = self.makeDir() def get_thermal_zones(self): return list(get_thermal_zones(self.thermal_zone_path)) def write_thermal_zone(self, name, temperature): zone_path = os.path.join(self.thermal_zone_path, name) if not os.path.isdir(zone_path): os.mkdir(zone_path) file = open(os.path.join(zone_path, "temperature"), "w") file.write("temperature: " + temperature) file.close() class GetThermalZonesTest(ThermalZoneTest): def test_non_existent_thermal_zone_directory(self): thermal_zones = list(get_thermal_zones("/non-existent/thermal_zone")) self.assertEqual(thermal_zones, []) def test_empty_thermal_zone_directory(self): self.assertEqual(self.get_thermal_zones(), []) def test_one_thermal_zone(self): self.write_thermal_zone("THM0", "50 C") thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 1) self.assertEqual(thermal_zones[0].name, "THM0") self.assertEqual(thermal_zones[0].temperature, "50 C") self.assertEqual(thermal_zones[0].temperature_value, 50) self.assertEqual(thermal_zones[0].temperature_unit, "C") self.assertEqual(thermal_zones[0].path, os.path.join(self.thermal_zone_path, "THM0")) def test_two_thermal_zones(self): self.write_thermal_zone("THM0", "50 C") self.write_thermal_zone("THM1", "51 C") thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 2) self.assertEqual(thermal_zones[0].temperature, "50 C") self.assertEqual(thermal_zones[0].temperature_value, 50) self.assertEqual(thermal_zones[0].temperature_unit, "C") self.assertEqual(thermal_zones[1].temperature, "51 C") self.assertEqual(thermal_zones[1].temperature_value, 51) self.assertEqual(thermal_zones[1].temperature_unit, "C") def test_badly_formatted_temperature(self): self.write_thermal_zone("THM0", "SOMETHING BAD") thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 1) self.assertEqual(thermal_zones[0].temperature, "SOMETHING BAD") self.assertEqual(thermal_zones[0].temperature_value, None) self.assertEqual(thermal_zones[0].temperature_unit, None) def test_badly_formatted_with_missing_space(self): self.write_thermal_zone("THM0", "SOMETHINGBAD") thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 1) self.assertEqual(thermal_zones[0].temperature, "SOMETHINGBAD") self.assertEqual(thermal_zones[0].temperature_value, None) self.assertEqual(thermal_zones[0].temperature_unit, None) def test_temperature_file_with_missing_label(self): self.write_thermal_zone("THM0", "SOMETHINGBAD") temperature_path = os.path.join(self.thermal_zone_path, "THM0/temperature") file = open(temperature_path, "w") file.write("bad-label: foo bar\n") file.close() thermal_zones = self.get_thermal_zones() self.assertEqual(len(thermal_zones), 1) self.assertEqual(thermal_zones[0].temperature, None) self.assertEqual(thermal_zones[0].temperature_value, None) self.assertEqual(thermal_zones[0].temperature_unit, None) landscape-client-14.01/landscape/lib/tests/test_tag.py0000644000175000017500000000275512301414317022574 0ustar andreasandreasimport unittest from landscape.lib.tag import is_valid_tag, is_valid_tag_list class ValidTagTest(unittest.TestCase): def test_valid_tags(self): """Test valid tags.""" self.assertTrue(is_valid_tag(u"london")) self.assertTrue(is_valid_tag(u"server")) self.assertTrue(is_valid_tag(u"ubuntu-server")) self.assertTrue(is_valid_tag(u"location-1234")) self.assertTrue( is_valid_tag(u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o")) def test_invalid_tags(self): """Test invalid tags.""" self.assertFalse(is_valid_tag(u"!!!")) self.assertFalse(is_valid_tag(u"location 1234")) self.assertFalse(is_valid_tag(u"ubuntu server")) def test_valid_tag_list(self): """Test valid taglist format strings.""" self.assertTrue(is_valid_tag_list(u"london, server")) self.assertTrue(is_valid_tag_list(u"ubuntu-server,london")) self.assertTrue(is_valid_tag_list(u"location-1234, server")) self.assertTrue( is_valid_tag_list( u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o, server")) def test_invalid_tag_list(self): """Test invalid taglist format strings.""" self.assertFalse(is_valid_tag_list(u"ubuntu-server,")) self.assertFalse(is_valid_tag_list(u"!!!,")) self.assertFalse(is_valid_tag_list(u"location 1234, server")) self.assertFalse(is_valid_tag_list( u"ubuntu, server, ")) landscape-client-14.01/landscape/lib/tests/test_bpickle.py0000644000175000017500000000315712301414317023427 0ustar andreasandreasimport unittest from landscape.lib import bpickle class BPickleTest(unittest.TestCase): def test_int(self): self.assertEqual(bpickle.loads(bpickle.dumps(1)), 1) def test_float(self): self.assertAlmostEquals(bpickle.loads(bpickle.dumps(2.3)), 2.3) def test_float_scientific_notation(self): number = 0.00005 self.assertTrue("e" in repr(number)) self.assertAlmostEquals(bpickle.loads(bpickle.dumps(number)), number) def test_string(self): self.assertEqual(bpickle.loads(bpickle.dumps('foo')), 'foo') def test_list(self): self.assertEqual(bpickle.loads(bpickle.dumps([1, 2, 'hello', 3.0])), [1, 2, 'hello', 3.0]) def test_tuple(self): data = bpickle.dumps((1, [], 2, 'hello', 3.0)) self.assertEqual(bpickle.loads(data), (1, [], 2, 'hello', 3.0)) def test_none(self): self.assertEqual(bpickle.loads(bpickle.dumps(None)), None) def test_unicode(self): self.assertEqual(bpickle.loads(bpickle.dumps(u'\xc0')), u'\xc0') def test_bool(self): self.assertEqual(bpickle.loads(bpickle.dumps(True)), True) def test_dict(self): dumped_tostr = bpickle.dumps({True: "hello"}) self.assertEqual(bpickle.loads(dumped_tostr), {True: "hello"}) dumped_tobool = bpickle.dumps({True: False}) self.assertEqual(bpickle.loads(dumped_tobool), {True: False}) def test_long(self): long = 99999999999999999999999999999 self.assertEqual(bpickle.loads(bpickle.dumps(long)), long) landscape-client-14.01/landscape/lib/tests/test_lock.py0000644000175000017500000000157512301414317022750 0ustar andreasandreasimport time import os from landscape.tests.helpers import LandscapeTest from landscape.lib.lock import lock_path, LockError class LockTest(LandscapeTest): def setUp(self): super(LockTest, self).setUp() self.filename = self.makeFile() def test_lock_creates_path(self): self.assertFalse(os.path.isfile(self.filename)) lock_path(self.filename) self.assertTrue(os.path.isfile(self.filename)) def test_lock_with_already_locked(self): unlock_path = lock_path(self.filename) self.assertRaises(LockError, lock_path, self.filename) unlock_path() lock_path(self.filename) def test_lock_with_timeout(self): lock_path(self.filename) started = time.time() self.assertRaises(LockError, lock_path, self.filename, timeout=0.5) self.assertTrue(started < time.time() - 0.5) landscape-client-14.01/landscape/lib/tests/test_sequenceranges.py0000644000175000017500000002362412301414317025027 0ustar andreasandreasimport unittest from landscape.lib.sequenceranges import ( SequenceRanges, remove_from_ranges, add_to_ranges, find_ranges_index, ranges_to_sequence, sequence_to_ranges, SequenceError) class SequenceRangesTest(unittest.TestCase): def setUp(self): self.ranges = [1, 2, (15, 17), 19, (21, 24), 26, 27] self.sequence = [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27] def test_empty_to_sequence(self): self.assertEqual(SequenceRanges().to_sequence(), []) def test_empty_to_ranges(self): self.assertEqual(SequenceRanges().to_ranges(), []) def test_from_to_sequence(self): obj = SequenceRanges.from_sequence(self.sequence) self.assertEqual(obj.to_sequence(), self.sequence) def test_from_to_ranges(self): obj = SequenceRanges.from_ranges(self.ranges) self.assertEqual(obj.to_ranges(), self.ranges) def test_to_ranges_immutable(self): obj = SequenceRanges.from_ranges(self.ranges) obj.to_ranges().append(123) self.assertEqual(obj.to_ranges(), self.ranges) def test_from_sequence_to_ranges(self): obj = SequenceRanges.from_sequence(self.sequence) self.assertEqual(list(obj.to_ranges()), self.ranges) def test_from_ranges_to_sequence(self): obj = SequenceRanges.from_ranges(self.ranges) self.assertEqual(list(obj.to_sequence()), self.sequence) def test_iter(self): obj = SequenceRanges.from_ranges(self.ranges) self.assertEqual(list(obj), self.sequence) def test_contains(self): obj = SequenceRanges.from_ranges(self.ranges) self.assertTrue(1 in obj) self.assertTrue(2 in obj) self.assertTrue(15 in obj) self.assertTrue(16 in obj) self.assertTrue(17 in obj) self.assertTrue(19 in obj) self.assertTrue(27 in obj) self.assertTrue(0 not in obj) self.assertTrue(3 not in obj) self.assertTrue(14 not in obj) self.assertTrue(18 not in obj) self.assertTrue(20 not in obj) self.assertTrue(28 not in obj) def test_add(self): obj = SequenceRanges() obj.add(1) self.assertEqual(obj.to_ranges(), [1]) obj.add(2) self.assertEqual(obj.to_ranges(), [1, 2]) obj.add(3) self.assertEqual(obj.to_ranges(), [(1, 3)]) obj.add(3) self.assertEqual(obj.to_ranges(), [(1, 3)]) def test_remove(self): obj = SequenceRanges.from_ranges([(1, 3)]) obj.remove(2) self.assertEqual(obj.to_ranges(), [1, 3]) obj.remove(1) self.assertEqual(obj.to_ranges(), [3]) obj.remove(3) self.assertEqual(obj.to_ranges(), []) obj.remove(4) self.assertEqual(obj.to_ranges(), []) class SequenceToRangesTest(unittest.TestCase): def test_empty(self): self.assertEqual(list(sequence_to_ranges([])), []) def test_one_element(self): self.assertEqual(list(sequence_to_ranges([1])), [1]) def test_two_elements(self): self.assertEqual(list(sequence_to_ranges([1, 2])), [1, 2]) def test_three_elements(self): self.assertEqual(list(sequence_to_ranges([1, 2, 3])), [(1, 3)]) def test_many_elements(self): sequence = [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27] self.assertEqual(list(sequence_to_ranges(sequence)), [1, 2, (15, 17), 19, (21, 24), 26, 27]) def test_out_of_order(self): self.assertRaises(SequenceError, sequence_to_ranges([2, 1]).next) def test_duplicated_item(self): self.assertRaises(SequenceError, sequence_to_ranges([1, 1]).next) class RangesToSequenceTest(unittest.TestCase): def test_empty(self): self.assertEqual(list(ranges_to_sequence([])), []) def test_one_element(self): self.assertEqual(list(ranges_to_sequence([1])), [1]) def test_two_elements(self): self.assertEqual(list(ranges_to_sequence([1, 2])), [1, 2]) def test_three_elements(self): self.assertEqual(list(ranges_to_sequence([(1, 3)])), [1, 2, 3]) def test_many_elements(self): ranges = [1, 2, (15, 17), 19, (21, 24), 26, 27] self.assertEqual(list(ranges_to_sequence(ranges)), [1, 2, 15, 16, 17, 19, 21, 22, 23, 24, 26, 27]) def test_invalid_range(self): """ If range start value is greater than the end one, an error is raised. """ ranges = [1, 2, (5, 3), 10] self.assertRaises(ValueError, list, ranges_to_sequence(ranges)) class FindRangesIndexTest(unittest.TestCase): def test_empty(self): self.assertEqual(find_ranges_index([], 2), 0) def test_sequence(self): self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 0), 0) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 1), 0) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 2), 1) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 3), 2) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 4), 3) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 5), 4) self.assertEqual(find_ranges_index([1, 2, 3, 4, 5], 6), 5) def test_sequence_with_missing(self): self.assertEqual(find_ranges_index([1, 2, 4, 5], 2), 1) self.assertEqual(find_ranges_index([1, 2, 4, 5], 3), 2) self.assertEqual(find_ranges_index([1, 2, 4, 5], 4), 2) def test_range(self): self.assertEqual(find_ranges_index([1, (2, 4), 5], 0), 0) self.assertEqual(find_ranges_index([1, (2, 4), 5], 1), 0) self.assertEqual(find_ranges_index([1, (2, 4), 5], 2), 1) self.assertEqual(find_ranges_index([1, (2, 4), 5], 3), 1) self.assertEqual(find_ranges_index([1, (2, 4), 5], 4), 1) self.assertEqual(find_ranges_index([1, (2, 4), 5], 5), 2) self.assertEqual(find_ranges_index([1, (2, 4), 5], 6), 3) def test_range_with_missing(self): self.assertEqual(find_ranges_index([1, (3, 4), 5], 0), 0) self.assertEqual(find_ranges_index([1, (3, 4), 5], 1), 0) self.assertEqual(find_ranges_index([1, (3, 4), 5], 2), 1) self.assertEqual(find_ranges_index([1, (3, 4), 5], 3), 1) self.assertEqual(find_ranges_index([1, (3, 4), 5], 4), 1) self.assertEqual(find_ranges_index([1, (3, 4), 5], 5), 2) self.assertEqual(find_ranges_index([1, (3, 4), 5], 6), 3) class AddToRangesTest(unittest.TestCase): def test_empty(self): ranges = [] add_to_ranges(ranges, 1) self.assertEqual(ranges, [1]) def test_append(self): ranges = [1] add_to_ranges(ranges, 2) self.assertEqual(ranges, [1, 2]) def test_prepend(self): ranges = [2] add_to_ranges(ranges, 1) self.assertEqual(ranges, [1, 2]) def test_insert(self): ranges = [1, 4] add_to_ranges(ranges, 2) self.assertEqual(ranges, [1, 2, 4]) def test_merge_sequence(self): ranges = [1, 2, 4, 5] add_to_ranges(ranges, 3) self.assertEqual(ranges, [(1, 5)]) def test_merge_ranges(self): ranges = [(1, 3), (5, 7)] add_to_ranges(ranges, 4) self.assertEqual(ranges, [(1, 7)]) def test_merge_sequence_and_ranges(self): ranges = [(1, 3), 5, 6, 7] add_to_ranges(ranges, 4) self.assertEqual(ranges, [(1, 7)]) def test_merge_sequence_and_ranges_with_gaps(self): ranges = [1, (3, 5), 7, 9] add_to_ranges(ranges, 6) self.assertEqual(ranges, [1, (3, 7), 9]) def test_dont_merge_ranges_with_gap(self): ranges = [(1, 3), (7, 9)] add_to_ranges(ranges, 5) self.assertEqual(ranges, [(1, 3), 5, (7, 9)]) def test_duplicate(self): ranges = [1] add_to_ranges(ranges, 1) self.assertEqual(ranges, [1]) def test_duplicate_in_range(self): ranges = [(1, 3)] add_to_ranges(ranges, 1) self.assertEqual(ranges, [(1, 3)]) add_to_ranges(ranges, 2) self.assertEqual(ranges, [(1, 3)]) add_to_ranges(ranges, 3) self.assertEqual(ranges, [(1, 3)]) class RemoveFromRangesTest(unittest.TestCase): def test_empty(self): ranges = [] remove_from_ranges(ranges, 1) self.assertEqual(ranges, []) def test_single(self): ranges = [1] remove_from_ranges(ranges, 1) self.assertEqual(ranges, []) def test_remove_before(self): ranges = [1, 2] remove_from_ranges(ranges, 1) self.assertEqual(ranges, [2]) def test_remove_after(self): ranges = [1, 2] remove_from_ranges(ranges, 2) self.assertEqual(ranges, [1]) def test_remove_inside(self): ranges = [1, 2, 3] remove_from_ranges(ranges, 2) self.assertEqual(ranges, [1, 3]) def test_remove_unexistent(self): ranges = [1, 3] remove_from_ranges(ranges, 2) self.assertEqual(ranges, [1, 3]) def test_split_range(self): ranges = [(1, 5)] remove_from_ranges(ranges, 3) self.assertEqual(ranges, [1, 2, 4, 5]) def test_split_range_into_ranges(self): ranges = [(1, 7)] remove_from_ranges(ranges, 4) self.assertEqual(ranges, [(1, 3), (5, 7)]) def test_decrement_left(self): ranges = [(1, 5)] remove_from_ranges(ranges, 1) self.assertEqual(ranges, [(2, 5)]) def test_decrement_right(self): ranges = [(1, 5)] remove_from_ranges(ranges, 5) self.assertEqual(ranges, [(1, 4)]) def test_dont_removing_unmatched_range(self): ranges = [(1, 3), (5, 7)] remove_from_ranges(ranges, 4) self.assertEqual(ranges, [(1, 3), (5, 7)]) def test_suite(): return unittest.TestSuite(( unittest.makeSuite(SequenceToRangesTest), unittest.makeSuite(RangesToSequenceTest), unittest.makeSuite(SequenceRangesTest), unittest.makeSuite(FindRangesIndexTest), unittest.makeSuite(AddToRangesTest), unittest.makeSuite(RemoveFromRangesTest), )) landscape-client-14.01/landscape/lib/tests/test_twisted_util.py0000644000175000017500000001013312301414317024526 0ustar andreasandreasimport os from landscape.tests.helpers import LandscapeTest from landscape.lib.twisted_util import spawn_process from landscape.lib.fs import create_file class SpawnProcessTest(LandscapeTest): def setUp(self): super(SpawnProcessTest, self).setUp() self.command = self.makeFile("#!/bin/sh\necho -n $@") os.chmod(self.command, 0755) def test_spawn_process_return_value(self): """ The process is executed and returns the expected exit code. """ create_file(self.command, "#!/bin/sh\nexit 2") def callback((out, err, code)): self.assertEqual(out, "") self.assertEqual(err, "") self.assertEqual(code, 2) result = spawn_process(self.command) result.addCallback(callback) return result def test_spawn_process_output(self): """ The process returns the expected standard output. """ def callback((out, err, code)): self.assertEqual(out, "a b") self.assertEqual(err, "") self.assertEqual(code, 0) result = spawn_process(self.command, args=("a", "b")) result.addCallback(callback) return result def test_spawn_process_error(self): """ The process returns the expected standard error. """ create_file(self.command, "#!/bin/sh\necho -n $@ >&2") def callback((out, err, code)): self.assertEqual(out, "") self.assertEqual(err, "a b") self.assertEqual(code, 0) result = spawn_process(self.command, args=("a", "b")) result.addCallback(callback) return result def test_spawn_process_callback(self): """ If a callback for process output is provieded, it is called for every line of output. """ create_file(self.command, "#!/bin/sh\n/bin/echo -ne $@") param = r"some text\nanother line\nok, last one\n" expected = ["some text", "another line", "ok, last one"] lines = [] def line_received(line): lines.append(line) def callback((out, err, code)): self.assertEqual(expected, lines) result = spawn_process(self.command, args=(param,), line_received=line_received) result.addCallback(callback) return result def test_spawn_process_callback_multiple_newlines(self): """ If output ends with more than one newline, empty lines are preserved. """ create_file(self.command, "#!/bin/sh\n/bin/echo -ne $@") param = r"some text\nanother line\n\n\n" expected = ["some text", "another line", "", ""] lines = [] def line_received(line): lines.append(line) def callback((out, err, code)): self.assertEqual(expected, lines) result = spawn_process(self.command, args=(param,), line_received=line_received) result.addCallback(callback) return result def test_spawn_process_callback_no_newline(self): """ If output ends without a newline, the line is still passed to the callback. """ create_file(self.command, "#!/bin/sh\n/bin/echo -ne $@") param = r"some text\nanother line\nok, last one" expected = ["some text", "another line", "ok, last one"] lines = [] def line_received(line): lines.append(line) def callback((out, err, code)): self.assertEqual(expected, lines) result = spawn_process(self.command, args=(param,), line_received=line_received) result.addCallback(callback) return result def test_spawn_process_with_stdin(self): """ Optionally C{spawn_process} accepts a C{stdin} argument. """ create_file(self.command, "#!/bin/sh\n/bin/cat") def callback((out, err, code)): self.assertEqual("hello", out) result = spawn_process(self.command, stdin="hello") result.addCallback(callback) return result landscape-client-14.01/landscape/lib/tests/test_bootstrap.py0000644000175000017500000001102012301414317024017 0ustar andreasandreasimport os from landscape.tests.helpers import LandscapeTest from landscape.lib.bootstrap import ( BootstrapPath, BootstrapFile, BootstrapDirectory, BootstrapList) class BootstrapPathTest(LandscapeTest): bootstrap_class = BootstrapPath def setUp(self): super(BootstrapPathTest, self).setUp() self.dirname = self.makeDir() self.path = os.path.join(self.dirname, "$my_var") self.real_path = os.path.join(self.dirname, "my_var_value") def test_username(self): getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("username").pw_uid self.mocker.result(1234) getuid = self.mocker.replace("os.getuid") getuid() self.mocker.result(0) chown = self.mocker.replace("os.chown") chown(self.real_path, 1234, -1) self.mocker.replay() file = self.bootstrap_class(self.real_path, username="username") file.bootstrap(my_var="my_var_value") def test_group(self): getgrnam = self.mocker.replace("grp.getgrnam") getgrnam("group").gr_gid self.mocker.result(5678) getuid = self.mocker.replace("os.getuid") getuid() self.mocker.result(0) chown = self.mocker.replace("os.chown") chown(self.real_path, -1, 5678) self.mocker.replay() file = self.bootstrap_class(self.path, group="group") file.bootstrap(my_var="my_var_value") def test_mode(self): chmod = self.mocker.replace("os.chmod") chmod(self.real_path, 0644) self.mocker.replay() file = self.bootstrap_class(self.path, mode=0644) file.bootstrap(my_var="my_var_value") def test_all_details(self): getuid = self.mocker.replace("os.getuid") getuid() self.mocker.result(0) getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("username").pw_uid self.mocker.result(1234) getgrnam = self.mocker.replace("grp.getgrnam") getgrnam("group").gr_gid self.mocker.result(5678) chown = self.mocker.replace("os.chown") chown(self.real_path, 1234, 5678) chmod = self.mocker.replace("os.chmod") chmod(self.real_path, 0644) self.mocker.replay() file = self.bootstrap_class(self.path, "username", "group", 0644) file.bootstrap(my_var="my_var_value") def test_all_details_with_non_root(self): getuid = self.mocker.replace("os.getuid") getuid() self.mocker.result(1000) chmod = self.mocker.replace("os.chmod") chmod(self.real_path, 0644) self.mocker.replay() file = self.bootstrap_class(self.path, "username", "group", 0644) file.bootstrap(my_var="my_var_value") class BootstrapCreationTest(BootstrapPathTest): bootstrap_class = BootstrapFile def exists(self, path): return os.path.isfile(path) def test_creation(self): file = self.bootstrap_class(self.path) self.assertFalse(self.exists(self.real_path)) file.bootstrap(my_var="my_var_value") self.assertTrue(self.exists(self.real_path)) class BootstrapFileTest(BootstrapCreationTest): def test_creation_wont_overwrite(self): filename = self.makeFile("CONTENT") file = self.bootstrap_class(filename) file.bootstrap() self.assertEqual(open(filename).read(), "CONTENT") class BootstrapDirectoryTest(BootstrapCreationTest): bootstrap_class = BootstrapDirectory def exists(self, path): return os.path.isdir(path) def test_creation_works_with_existing(self): dirname = self.makeDir() dir = self.bootstrap_class(dirname) dir.bootstrap() self.assertTrue(self.exists(dirname)) def test_creation_will_fail_correctly(self): filename = self.makeFile("I AM A *FILE*") dir = self.bootstrap_class(filename) self.assertRaises(OSError, dir.bootstrap) class BootstrapListTest(LandscapeTest): def test_creation(self): dirname = self.makeDir() list = BootstrapList([BootstrapFile("$dirname/filename"), BootstrapDirectory("$dirname/dirname"), BootstrapFile("$dirname/dirname/filename")]) list.bootstrap(dirname=dirname) self.assertTrue(os.path.isfile(os.path.join(dirname, "filename"))) self.assertTrue(os.path.isdir(os.path.join(dirname, "dirname"))) self.assertTrue(os.path.isfile(os.path.join(dirname, "dirname/filename"))) landscape-client-14.01/landscape/lib/tests/test_process.py0000644000175000017500000001355012301414317023472 0ustar andreasandreasimport unittest import os from landscape.tests.helpers import LandscapeTest from landscape.lib.process import calculate_pcpu, ProcessInformation from landscape.lib.fs import create_file class ProcessInfoTest(LandscapeTest): def setUp(self): super(ProcessInfoTest, self).setUp() self.proc_dir = self.makeDir() def _add_process_info(self, process_id, state="R (running)"): """Add information about a process. The cmdline, status and stat files will be created in the process directory, so that get_process_info can get the required information. """ process_dir = os.path.join(self.proc_dir, str(process_id)) os.mkdir(process_dir) cmd_line = "/usr/bin/foo" create_file(os.path.join(process_dir, "cmdline"), cmd_line) status = "\n".join([ "Name: foo", "State: %s" % state, "Uid: 1000", "Gid: 2000", "VmSize: 3000", "Ignored: value"]) create_file(os.path.join(process_dir, "status"), status) stat_array = [str(index) for index in range(44)] stat = " ".join(stat_array) create_file(os.path.join(process_dir, "stat"), stat) def test_missing_process_race(self): """ We use os.listdir("/proc") to get the list of active processes, if a process ends before we attempt to read the process' information, then this should not trigger an error. """ listdir_mock = self.mocker.replace("os.listdir") listdir_mock("/proc") self.mocker.result(["12345"]) class FakeFile(object): def __init__(self, response=""): self._response = response self.closed = False def readline(self): return self._response def __iter__(self): if self._response is None: raise IOError("Fake file error") else: yield self._response def close(self): self.closed = True open_mock = self.mocker.replace("__builtin__.open") open_mock("/proc/12345/cmdline", "r") fakefile1 = FakeFile("test-binary") self.mocker.result(fakefile1) open_mock("/proc/12345/status", "r") fakefile2 = FakeFile(None) self.mocker.result(fakefile2) self.mocker.replay() process_info = ProcessInformation("/proc") processes = list(process_info.get_all_process_info()) self.assertEqual(processes, []) self.assertTrue(fakefile1.closed) self.assertTrue(fakefile2.closed) def test_get_process_info_state(self): """ C{get_process_info} reads the process state from the status file and uses the first character to represent the process state. """ self._add_process_info(12, state="A (some state)") process_info = ProcessInformation(self.proc_dir) info = process_info.get_process_info(12) self.assertEqual("A", info["state"]) def test_get_process_info_state_preserves_case(self): """ C{get_process_info} retains the case of the process state, since for example both x and X can be different states. """ self._add_process_info(12, state="a (some state)") process_info = ProcessInformation(self.proc_dir) info = process_info.get_process_info(12) self.assertEqual("a", info["state"]) def test_get_process_info_state_tracing_stop_lucid(self): """ In Lucid, capital T was used for both stopped and tracing stop. From Natty and onwards lowercase t is used for tracing stop, so we special-case that state and always return lowercase t for tracing stop. """ self._add_process_info(12, state="T (tracing stop)") self._add_process_info(13, state="t (tracing stop)") process_info = ProcessInformation(self.proc_dir) info1 = process_info.get_process_info(12) info2 = process_info.get_process_info(12) self.assertEqual("t", info1["state"]) self.assertEqual("t", info2["state"]) class CalculatePCPUTest(unittest.TestCase): """ calculate_pcpu is lifted directly from procps/ps/output.c (it's called "pcpu" in there). What it actually does is... The result is "number of jiffies allocated to the process / number of jiffies the process has been running". How the jiffies are allocated to the process is CPU agnostic, and my reading of the percentage capping is to prevent something like... jiffies allocated on CPU #1 600, jiffies allocated on CPU #2 600 = 1200 Jiffies allocated to a process that's only been running for 1000 jiffies So, that would look wrong, but is entirely plausible. """ def test_calculate_pcpu_real_data(self): self.assertEqual( calculate_pcpu(51286, 5000, 19000.07, 9281.0, 100), 3.0) def test_calculate_pcpu(self): """ This calculates the pcpu based on 10000 jiffies allocated to a process over 50000 jiffies. This should be cpu utilisation of 20% """ self.assertEqual(calculate_pcpu(8000, 2000, 1000, 50000, 100), 20.0) def test_calculate_pcpu_capped(self): """ This calculates the pcpu based on 100000 jiffies allocated to a process over 50000 jiffies. This should be cpu utilisation of 200% but capped at 99% CPU utilisation. """ self.assertEqual(calculate_pcpu(98000, 2000, 1000, 50000, 100), 99.0) def test_calculate_pcpu_floored(self): """ This calculates the pcpu based on 1 jiffies allocated to a process over 80 jiffies this should be negative, but floored to 0.0. """ self.assertEqual(calculate_pcpu(1, 0, 50, 800, 10), 0.0) landscape-client-14.01/landscape/lib/tests/test_juju.py0000644000175000017500000000462512301414317022774 0ustar andreasandreasfrom collections import namedtuple import json from landscape.tests.helpers import LandscapeTest from landscape.lib.juju import get_juju_info SAMPLE_JUJU_INFO = json.dumps({"environment-uuid": "DEAD-BEEF", "unit-name": "service/0", "api-addresses": "10.0.3.1:17070", "private-address": "127.0.0.1"}) class JujuTest(LandscapeTest): Config = namedtuple("Config", "juju_filename") def test_get_juju_info_sample_data(self): """L{get_juju_info} parses JSON data from the juju_filename file.""" stub_config = self.Config(self.makeFile(SAMPLE_JUJU_INFO)) juju_info = get_juju_info(stub_config) self.assertEqual( {u"environment-uuid": "DEAD-BEEF", u"unit-name": "service/0", u"api-addresses": ["10.0.3.1:17070"], u"private-address": "127.0.0.1"}, juju_info) def test_get_juju_info_empty_file(self): """ If L{get_juju_info} is called with a configuration pointing to an empty file, it returns C{None}. """ stub_config = self.Config(self.makeFile("")) juju_info = get_juju_info(stub_config) self.log_helper.ignore_errors(ValueError) self.assertEqual(juju_info, None) self.assertIn("Error attempting to read JSON", self.logfile.getvalue()) def test_get_juju_info_not_a_file(self): """ If L{get_juju_info} is called with a configuration pointing to a directory, it returns C{None}. """ stub_config = self.Config("/") juju_info = get_juju_info(stub_config) self.assertIs(juju_info, None) def test_get_juju_info_multiple_endpoints(self): """L{get_juju_info} turns space separated API addresses into a list.""" juju_multiple_endpoints = json.dumps({ "environment-uuid": "DEAD-BEEF", "unit-name": "service/0", "api-addresses": "10.0.3.1:17070 10.0.3.2:18080", "private-address": "127.0.0.1"}) stub_config = self.Config(self.makeFile(juju_multiple_endpoints)) juju_info = get_juju_info(stub_config) self.assertEqual( {u"environment-uuid": "DEAD-BEEF", u"unit-name": "service/0", u"api-addresses": ["10.0.3.1:17070", "10.0.3.2:18080"], u"private-address": "127.0.0.1"}, juju_info) landscape-client-14.01/landscape/lib/tests/test_dns.py0000644000175000017500000001431012301414317022573 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.lib.dns import ( _lookup_server_record, _lookup_hostname, discover_server) from twisted.internet import defer from twisted.names import dns from twisted.names.error import ResolverError class FakeResolverResult(object): """ A fake resolver result returned by L{FakeResolver}. @param type: The result type L{twisted.names.dns.SRV} @param payload: The result contents """ def __init__(self): self.type = None class Payload(object): """ A payload result returned by fake resolver. @param target: The result of the lookup """ def __init__(self): self.target = "" class Target(object): """ A payload target returned by fake resolver. @param name: The name contained by the target. """ def __init__(self): self.name = "" self.payload = Payload() self.payload.target = Target() class FakeResolver(object): """ A fake resolver that mimics L{twisted.names.client.Resolver} """ def __init__(self): self.results = None self.name = None self.queried = None def lookupService(self, arg1): self.queried = arg1 deferred = defer.Deferred() deferred.callback(self.results) return deferred def getHostByName(self, arg1): self.queried = arg1 deferred = defer.Deferred() deferred.callback(self.name) return deferred class BadResolver(object): """ A resolver that mimics L{twisted.names.client.Resolver} and always returns an error. """ def lookupService(self, arg1): deferred = defer.Deferred() deferred.errback(ResolverError("Couldn't connect")) return deferred def getHostByName(self, arg1): deferred = defer.Deferred() deferred.errback(ResolverError("Couldn't connect")) return deferred class DnsSrvLookupTest(LandscapeTest): def test_with_server_found(self): """ Looking up a DNS SRV record should return the result of the lookup. """ fake_result = FakeResolverResult() fake_result.type = dns.SRV fake_result.payload.target.name = "a.b.com" fake_resolver = FakeResolver() fake_resolver.results = [[fake_result]] query_string = "_landscape._tcp.mylandscapehost.com" def check(result): self.assertEqual(fake_resolver.queried, query_string) self.assertEqual("a.b.com", result) d = _lookup_server_record(fake_resolver, query_string) d.addCallback(check) return d def test_with_server_not_found(self): """ Looking up a DNS SRV record and finding nothing exists should return an empty string. """ fake_resolver = FakeResolver() fake_resolver.results = [[]] def check(result): self.assertEqual("", result) d = _lookup_server_record(fake_resolver, "_landscape._tcp.mylandscapehost.com") d.addCallback(check) return d def test_with_resolver_error(self): """A resolver error triggers error handling code.""" # The failure should be properly logged logging_mock = self.mocker.replace("logging.info") logging_mock("SRV lookup of _landscape._tcp.mylandscapehost.com " "failed.") self.mocker.replay() d = _lookup_server_record(BadResolver(), "_landscape._tcp.mylandscapehost.com") self.assertFailure(d, ResolverError) return d class DnsNameLookupTest(LandscapeTest): def test_with_name_found(self): """ Looking up a DNS name record should return the result of the lookup. """ fake_resolver = FakeResolver() fake_resolver.name = "a.b.com" query_string = "landscape.localdomain" def check(result): self.assertEqual(fake_resolver.queried, query_string) self.assertEqual("a.b.com", result) d = _lookup_hostname(None, fake_resolver, query_string) d.addCallback(check) return d def test_with_name_not_found(self): """ Looking up a DNS NAME record and not finding a result should return None. """ fake_resolver = FakeResolver() fake_resolver.name = None def check(result): self.assertEqual(None, result) d = _lookup_hostname(None, fake_resolver, "landscape.localdomain") d.addCallback(check) return d def test_with_resolver_error(self): """A resolver error triggers error handling code.""" # The failure should be properly logged logging_mock = self.mocker.replace("logging.info") logging_mock("Name lookup of landscape.localdomain failed.") self.mocker.replay() d = _lookup_hostname(None, BadResolver(), "landscape.localdomain") self.assertFailure(d, ResolverError) return d class DiscoverServerTest(LandscapeTest): def test_srv_lookup(self): """The DNS name of the server is found using a SRV lookup.""" fake_result = FakeResolverResult() fake_result.type = dns.SRV fake_result.payload.target.name = "a.b.com" fake_resolver = FakeResolver() fake_resolver.results = [[fake_result]] d = discover_server(resolver=fake_resolver) def check(result): self.assertEqual("a.b.com", result) d.addCallback(check) return d def test_a_name_lookup(self): """The DNS name of the server is found using an A name lookup.""" fake_resolver = FakeResolver() fake_resolver.name = "x.y.com" d = discover_server(resolver=fake_resolver) def check(result): self.assertEqual("x.y.com", result) d.addCallback(check) return d def test_failed_lookup(self): """A resolver error is returned when server autodiscovery fails.""" d = _lookup_server_record(BadResolver(), "landscape.localdomain") self.assertFailure(d, ResolverError) return d landscape-client-14.01/landscape/lib/tests/test_disk.py0000644000175000017500000002360212301414317022745 0ustar andreasandreasimport os from landscape.lib.disk import ( get_filesystem_for_path, get_mount_info, is_device_removable, _get_device_removable_file_path) from landscape.tests.helpers import LandscapeTest class DiskUtilitiesTest(LandscapeTest): def setUp(self): super(DiskUtilitiesTest, self).setUp() self.mount_file = self.makeFile("") self.stat_results = {} def statvfs(self, point): """ Return the requested mount point information. If C{read_access} was set to C{False} when this mount point was created, then we raise an exception to simulate a permission denied error. """ if self.read_access: return self.stat_results[point] else: raise OSError("Permission denied") def set_mount_points(self, points, read_access=True): """ This method prepares a fake mounts file containing the mount points specified in the C{points} list of strings. This file can then be used by referencing C{self.mount_file}. If C{read_access} is set to C{False}, then all mount points will yield a permission denied error when inspected. """ self.read_access = read_access content = "\n".join("/dev/sda%d %s ext4 rw 0 0" % (i, point) for i, point in enumerate(points)) f = open(self.mount_file, "w") f.write(content) f.close() for point in points: self.stat_results[point] = (4096, 0, 1000, 500, 0, 0, 0, 0, 0) def test_get_filesystem_for_path(self): self.set_mount_points(["/"]) info = get_filesystem_for_path("/", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/") def test_get_filesystem_subpath(self): self.set_mount_points(["/"]) self.stat_results["/"] = (4096, 0, 1000, 500, 0, 0, 0, 0, 0) info = get_filesystem_for_path("/home", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/") def test_get_filesystem_subpath_closest(self): self.set_mount_points(["/", "/home"]) info = get_filesystem_for_path("/home", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/home") def test_get_filesystem_subpath_not_stupid(self): self.set_mount_points(["/", "/ho"]) info = get_filesystem_for_path("/home", self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/") def test_symlink_home(self): symlink_path = self.makeFile() os.symlink("/foo/bar", symlink_path) self.addCleanup(os.remove, symlink_path) self.set_mount_points(["/", "/foo"]) info = get_filesystem_for_path(symlink_path, self.mount_file, self.statvfs) self.assertEqual(info["mount-point"], "/foo") def test_ignore_unreadable_mount_point(self): """ We should ignore mountpoints which are unreadable by the user who is logging in. """ self.set_mount_points(["/secret"], read_access=False) info = get_filesystem_for_path( "/secret", self.mount_file, self.statvfs) self.assertIdentical(info, None) def test_ignore_unmounted_and_virtual_mountpoints(self): """ Make sure autofs and virtual mountpoints are ignored. This is to ensure non-regression on bug #1045374. """ self.read_access = True content = "\n".join(["auto_direct /opt/whatever autofs", "none /run/lock tmpfs", "proc /proc proc", "/dev/sda1 /home ext4"]) f = open(self.mount_file, "w") f.write(content) f.close() self.stat_results["/home"] = (4096, 0, 1000, 500, 0, 0, 0, 0, 0) result = [x for x in get_mount_info(self.mount_file, self.statvfs)] expected = {"device": "/dev/sda1", "mount-point": "/home", "filesystem": "ext4", "total-space": 3, "free-space": 1} self.assertEqual([expected], result) class RemovableDiskTest(LandscapeTest): def test_wb_get_device_removable_file_path(self): """ When passed a device in /dev, the L{_get_device_removable_file_path} function returns the corresponding removable file path in /sys/block. """ device = "/dev/sdb" expected = "/sys/block/sdb/removable" is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(False) self.mocker.replay() result = _get_device_removable_file_path(device) self.assertEqual(expected, result) def test_wb_get_device_removable_file_path_with_partition(self): """ When passed a device in /dev with a partition number, the L{_get_device_removable_file_path} function returns the corresponding removable file path in /sys/block. """ device = "/dev/sdb1" expected = "/sys/block/sdb/removable" is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(False) self.mocker.replay() result = _get_device_removable_file_path(device) self.assertEqual(expected, result) def test_wb_get_device_removable_file_path_without_dev(self): """ When passed a device name (not the whole path), the L{_get_device_removable_file_path} function returns the corresponding removable file path in /sys/block. """ device = "sdb1" expected = "/sys/block/sdb/removable" is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(False) self.mocker.replay() result = _get_device_removable_file_path(device) self.assertEqual(expected, result) def test_wb_get_device_removable_file_path_with_symlink(self): """ When the device path passed to L{_get_device_removable_file_path} is a symlink (it's the case when disks are mounted by uuid or by label), the L{_get_device_removable_file_path} function returns the proper corresponding file path in /sys/block. """ device = "/dev/disk/by-uuid/8b2ec410-ebd2-49ec-bb3c-b8b13effab08" readlink_mock = self.mocker.replace(os.readlink) readlink_mock(device) self.mocker.result("../../sda1") is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(True) self.mocker.replay() expected = "/sys/block/sda/removable" result = _get_device_removable_file_path(device) self.assertEqual(expected, result) def test_wb_get_device_removable_file_path_raid_device(self): """ When passed a more exotic device file, like for example a raid device (e.g. /dev/cciss/c0d1p1), the L{_get_device_removable_file_path} function does not fail, and returns the expected /sys/block//removable path. """ device = "/dev/cciss/c0d0p0" # The expected path does not exists, but it doesn't matter here. expected = "/sys/block/c/removable" is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(False) self.mocker.replay() result = _get_device_removable_file_path(device) self.assertEqual(expected, result) def test_is_device_removable(self): """ Given the path to a file, determine if it means the device is removable or not. """ device = "/dev/sdb1" path = self.makeFile("1") removable_mock = self.mocker.replace(_get_device_removable_file_path) removable_mock(device) self.mocker.result(path) self.mocker.replay() self.assertTrue(is_device_removable(device)) def test_is_device_removable_false(self): """ Given the path to a file, determine if it means the device is removable or not. """ device = "/dev/sdb1" path = self.makeFile("0") removable_mock = self.mocker.replace(_get_device_removable_file_path) removable_mock(device) self.mocker.result(path) self.mocker.replay() self.assertFalse(is_device_removable(device)) def test_is_device_removable_garbage(self): """ Given the path to a file, determine if it means the device is removable or not. """ device = "/dev/sdb1" path = self.makeFile("Some garbage") removable_mock = self.mocker.replace(_get_device_removable_file_path) removable_mock(device) self.mocker.result(path) self.mocker.replay() self.assertFalse(is_device_removable(device)) def test_is_device_removable_path_doesnt_exist(self): """ When given a non-existing path, report the device as not removable. """ device = "/dev/sdb1" path = "/what/ever" removable_mock = self.mocker.replace(_get_device_removable_file_path) removable_mock(device) self.mocker.result(path) self.mocker.replay() self.assertFalse(is_device_removable(device)) def test_is_removable_raid_device(self): """ When passed the path to a raid device (e.g. /dev/cciss/c0d0p0), the is_device_removable function returns False. """ device = "/dev/cciss/c0d1p1" is_link_mock = self.mocker.replace(os.path.islink) is_link_mock(device) self.mocker.result(False) self.mocker.replay() self.assertFalse(is_device_removable(device)) def test_is_device_removable_memory_card(self): """ The kernel/udev currently consider memory cards such as SD cards as non removable """ device = "/dev/mmcblk0p1" # Device 0, parition 1 self.assertTrue(is_device_removable(device)) landscape-client-14.01/landscape/lib/tests/test_amp.py0000644000175000017500000007064212301414317022576 0ustar andreasandreasfrom twisted.internet import reactor from twisted.internet.error import ConnectError, ConnectionDone from twisted.internet.task import Clock from twisted.internet.defer import Deferred, inlineCallbacks from twisted.python.failure import Failure from landscape.lib.amp import ( MethodCallError, MethodCallServerProtocol, MethodCallClientProtocol, MethodCallServerFactory, MethodCallClientFactory, RemoteObject, MethodCallSender) from landscape.tests.helpers import LandscapeTest class FakeTransport(object): """Accumulate written data into a list.""" def __init__(self, connection): self.stream = [] self.connection = connection def write(self, data): self.stream.append(data) def loseConnection(self): raise NotImplemented() def getPeer(self): pass def getHost(self): pass class FakeConnection(object): """Simulate a connection between a client and a server protocol.""" def __init__(self, client, server): self.client = client self.server = server def make(self): self.server.makeConnection(FakeTransport(self)) self.client.makeConnection(FakeTransport(self)) def lose(self, connector, reason): self.server.connectionLost(reason) self.client.connectionLost(reason) self.client.factory.clientConnectionLost(connector, reason) def flush(self): """ Notify the server of any data written by the client and viceversa. """ while True: if self.client.transport and self.client.transport.stream: self.server.dataReceived(self.client.transport.stream.pop(0)) elif self.server.transport and self.server.transport.stream: self.client.dataReceived(self.server.transport.stream.pop(0)) else: break class FakeConnector(object): """Make L{FakeConnection}s using the given server and client factories.""" def __init__(self, client, server): self.client = client self.server = server self.connection = None @property def factory(self): return self.client def connect(self): self.connection = FakeConnection(self.client.buildProtocol(None), self.server.buildProtocol(None)) # XXX Let the client factory be aware of this fake connection, so # it can flush it when needed. This is to workaround AMP not # supporting synchronous transports self.client.fake_connection = self.connection self.connection.make() def disconnect(self): self.connection.lose(self, Failure(ConnectionDone())) class DummyObject(object): method = None class MethodCallTest(LandscapeTest): def setUp(self): super(MethodCallTest, self).setUp() self.methods = ["method"] self.object = DummyObject() server = MethodCallServerProtocol(self.object, self.methods) client = MethodCallClientProtocol() self.connection = FakeConnection(client, server) self.connection.make() self.clock = Clock() self.sender = MethodCallSender(client, self.clock) def test_with_forbidden_method(self): """ If a method is not included in L{MethodCallServerFactory.methods} it can't be called. """ self.methods.remove("method") deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) def test_with_no_arguments(self): """ A connected client can issue a L{MethodCall} without arguments and with an empty response. """ self.object.method = lambda: None deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) self.connection.flush() self.assertIs(None, self.successResultOf(deferred)) def test_with_return_value(self): """ A connected client can issue a L{MethodCall} targeted to an object method with a return value. """ self.object.method = lambda: "Cool result" deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) self.connection.flush() self.assertEqual("Cool result", self.successResultOf(deferred)) def test_with_one_argument(self): """ A connected AMP client can issue a L{MethodCall} with one argument and a response value. """ self.object.method = lambda word: word.capitalize() deferred = self.sender.send_method_call(method="method", args=["john"], kwargs={}) self.connection.flush() self.assertEqual("John", self.successResultOf(deferred)) def test_with_boolean_return_value(self): """ The return value of a L{MethodCall} argument can be a boolean. """ self.object.method = lambda word: len(word) < 3 deferred = self.sender.send_method_call(method="method", args=["hi"], kwargs={}) self.connection.flush() self.assertTrue(self.successResultOf(deferred)) def test_with_many_arguments(self): """ A connected client can issue a L{MethodCall} with many arguments. """ self.object.method = lambda word1, word2: word1 + word2 deferred = self.sender.send_method_call(method="method", args=["We ", "rock"], kwargs={}) self.connection.flush() self.assertEqual("We rock", self.successResultOf(deferred)) def test_with_default_arguments(self): """ A connected client can issue a L{MethodCall} for methods having default arguments. """ self.object.method = lambda word, index=0: word[index:].lower() deferred = self.sender.send_method_call(method="method", args=["OHH"], kwargs={}) self.connection.flush() self.assertEqual("ohh", self.successResultOf(deferred)) def test_with_overriden_default_arguments(self): """ A connected client can issue a L{MethodCall} with keyword arguments having default values in the target object. If a value is specified by the caller it will be used in place of the default value """ self.object.method = lambda word, index=0: word[index:].lower() deferred = self.sender.send_method_call(method="method", args=["ABC"], kwargs={"index": 2}) self.connection.flush() self.assertEqual("c", self.successResultOf(deferred)) def test_with_dictionary_arguments(self): """ Method arguments passed to a L{MethodCall} can be dictionaries. """ self.object.method = lambda d: "".join(d.keys()) * sum(d.values()) deferred = self.sender.send_method_call(method="method", args=[{"foo": 1, "bar": 2}], kwargs={}) self.connection.flush() self.assertEqual("foobarfoobarfoobar", self.successResultOf(deferred)) def test_with_non_serializable_return_value(self): """ If the target object method returns an object that can't be serialized, the L{MethodCall} raises an error. """ class Complex(object): pass self.object.method = lambda: Complex() deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) def test_with_long_argument(self): """ The L{MethodCall} protocol supports sending method calls with arguments bigger than the maximum AMP parameter value size. """ self.object.method = lambda word: len(word) == 65535 deferred = self.sender.send_method_call(method="method", args=["!" * 65535], kwargs={}) self.connection.flush() self.assertTrue(self.successResultOf(deferred)) def test_with_long_argument_multiple_calls(self): """ The L{MethodCall} protocol supports concurrently sending multiple method calls with arguments bigger than the maximum AMP value size. """ self.object.method = lambda word: len(word) deferred1 = self.sender.send_method_call(method="method", args=["!" * 80000], kwargs={}) deferred2 = self.sender.send_method_call(method="method", args=["*" * 90000], kwargs={}) self.connection.flush() self.assertEqual(80000, self.successResultOf(deferred1)) self.assertEqual(90000, self.successResultOf(deferred2)) def test_with_exception(self): """ If the target object method raises an exception, the remote call fails with a L{MethodCallError}. """ self.object.method = lambda a, b: a / b deferred = self.sender.send_method_call(method="method", args=[1, 0], kwargs={}) self.connection.flush() self.failureResultOf(deferred).trap(MethodCallError) def test_with_successful_deferred(self): """ If the target object method returns a L{Deferred}, it is handled transparently. """ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) deferred.addCallback(result.append) self.connection.flush() # At this point the receiver is waiting for method to complete, so # the deferred has not fired yet self.assertEqual([], result) # Fire the deferred and let the receiver respond self.object.deferred.callback("Hey!") self.connection.flush() self.assertEqual(["Hey!"], result) def test_with_failing_deferred(self): """ If the target object method returns a failing L{Deferred}, a L{MethodCallError} is raised. """ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) deferred.addErrback(result.append) self.connection.flush() # At this point the receiver is waiting for method to complete, so # the deferred has not fired yet self.assertEqual([], result) # Simulate time advancing and the receiver responding self.object.deferred.errback(Exception()) self.connection.flush() [failure] = result failure.trap(MethodCallError) def test_with_deferred_timeout(self): """ If the peer protocol doesn't send a response for a deferred within the given timeout, the method call fails. """ self.object.method = lambda: Deferred() result = [] deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) deferred.addErrback(result.append) self.clock.advance(60) [failure] = result failure.trap(MethodCallError) def test_with_late_response(self): """ If the peer protocol sends a late response for a request that has already timeout, that response is ignored. """ self.object.deferred = Deferred() self.object.method = lambda: self.object.deferred result = [] deferred = self.sender.send_method_call(method="method", args=[], kwargs={}) deferred.addErrback(result.append) self.clock.advance(60) self.object.deferred.callback("late") [failure] = result failure.trap(MethodCallError) class RemoteObjectTest(LandscapeTest): def setUp(self): super(RemoteObjectTest, self).setUp() self.methods = ["method"] self.object = DummyObject() self.clock = Clock() self.factory = MethodCallClientFactory(self.clock) server_factory = MethodCallServerFactory(self.object, self.methods) self.connector = FakeConnector(self.factory, server_factory) self.connector.connect() self.remote = self.successResultOf(self.factory.getRemoteObject()) def test_with_forbidden_method(self): """ A L{RemoteObject} can send L{MethodCall}s without arguments and withj an empty response. """ self.methods.remove("method") deferred = self.remote.method() failure = self.failureResultOf(deferred) self.assertEqual("Forbidden method 'method'", str(failure.value)) def test_with_no_arguments(self): """ A L{RemoteObject} can send L{MethodCall}s without arguments and with an empty response. """ self.object.method = lambda: None deferred = self.remote.method() self.assertIs(None, self.successResultOf(deferred)) def test_with_return_value(self): """ A L{RemoteObject} can send L{MethodCall}s without arguments and get back the value of the commands's response. """ self.object.method = lambda: "Cool" deferred = self.remote.method() self.assertEqual("Cool", self.successResultOf(deferred)) def test_with_arguments(self): """ A L{RemoteObject} can send L{MethodCall}s with one argument and get the response value. """ self.object.method = lambda word, times=2: word * times deferred = self.remote.method("hi", times=3) self.assertEqual("hihihi", self.successResultOf(deferred)) def test_method_call_error(self): """ If a L{MethodCall} fails due to a L{MethodCallError}, the L{RemoteObject} won't try to perform it again, even if the C{retryOnReconnect} error is set, as a L{MethodCallError} is a permanent failure that is not likely to ever succeed. """ self.methods.remove("method") self.factory.retryOnReconnect = True deferred = self.remote.method() self.failureResultOf(deferred).trap(MethodCallError) def test_retry(self): """ If the connection is lost and C{retryOnReconnect} is C{True} on the factory, the L{RemoteObject} will transparently retry to perform the L{MethodCall} requests that failed due to the broken connections. """ self.object.method = lambda word: word.capitalize() self.factory.factor = 0.19 self.factory.retryOnReconnect = True self.connector.disconnect() deferred = self.remote.method("john") # The deferred has not fired yet, because it's been put in the pending # queue, until the call gets a chance to be retried upon reconnection self.assertFalse(deferred.called) # Time passes and the factory successfully reconnects self.clock.advance(1) # We finally get the result self.assertEqual("John", self.successResultOf(deferred)) def test_retry_with_method_call_error(self): """ If a retried L{MethodCall} request fails due to a L{MethodCallError}, the L{RemoteObject} will properly propagate the error to the original caller. """ self.methods.remove("method") self.factory.factor = 0.19 self.factory.retryOnReconnect = True self.connector.disconnect() deferred = self.remote.method() # The deferred has not fired yet, because it's been put in the pending # queue, until the call gets a chance to be retried upon reconnection self.assertFalse(deferred.called) # Time passes and the factory successfully reconnects self.clock.advance(1) failure = self.failureResultOf(deferred) self.assertEqual("Forbidden method 'method'", str(failure.value)) class MethodCallClientFactoryTest(LandscapeTest): def setUp(self): super(MethodCallClientFactoryTest, self).setUp() self.clock = Clock() self.factory = MethodCallClientFactory(self.clock) def test_max_delay(self): """ The L{MethodCallClientFactory} class has a default value of 30 seconds for the maximum reconnection delay. """ self.assertEqual(self.factory.maxDelay, 30) def test_connect_notifier(self): """ The C{notifyOnConnect} method supports specifying a callback that will be invoked when a connection has been established. """ protocols = [] self.factory.notifyOnConnect(protocols.append) protocol = self.factory.buildProtocol(None) protocol.connectionMade() self.assertEqual([protocol], protocols) def test_connect_notifier_with_reconnect(self): """ The C{notifyOnConnect} method will also callback when a connection is re-established after it was lost. """ protocols = [] self.factory.notifyOnConnect(protocols.append) protocol1 = self.factory.buildProtocol(None) protocol1.connectionMade() protocol2 = self.factory.buildProtocol(None) protocol2.connectionMade() self.assertEqual([protocol1, protocol2], protocols) def test_get_remote_object(self): """ The C{getRemoteObject} method returns a deferred firing with a connected L{RemoteBroker}. """ deferred = self.factory.getRemoteObject() protocol = self.factory.buildProtocol(None) protocol.connectionMade() self.assertIsInstance(self.successResultOf(deferred), RemoteObject) def test_get_remote_object_failure(self): """ If the factory fails to establish a connection the deferreds returned by C{getRemoteObject} will fail. """ deferred = self.factory.getRemoteObject() self.factory.continueTrying = False # Don't retry self.factory.clientConnectionFailed(None, Failure(ConnectError())) self.failureResultOf(deferred).trap(ConnectError) def test_client_connection_failed(self): """ The L{MethodCallClientFactory} keeps trying to connect if maxRetries is not reached. """ class FakeConnector(object): called = False def connect(self): self.called = True connector = FakeConnector() self.assertEqual(self.factory.retries, 0) self.factory.clientConnectionFailed(connector, None) self.assertEqual(self.factory.retries, 1) self.clock.advance(5) self.assertTrue(connector.called) def test_reconnect(self): """ If the connection is lost, the L{RemoteObject} created by the creator will transparently handle the reconnection. """ dummy_object = DummyObject() dummy_object.method = lambda: None server_factory = MethodCallServerFactory(dummy_object, ["method"]) connector = FakeConnector(self.factory, server_factory) connector.connect() remote = self.successResultOf(self.factory.getRemoteObject()) connector.disconnect() self.clock.advance(5) deferred = remote.method() self.assertIs(None, self.successResultOf(deferred)) class MethodCallFunctionalTest(LandscapeTest): def setUp(self): super(MethodCallFunctionalTest, self).setUp() self.methods = ["method"] self.object = DummyObject() self.object.method = lambda word: word.capitalize() self.socket = self.mktemp() self.server = MethodCallServerFactory(self.object, self.methods) self.client = MethodCallClientFactory(reactor) self.port = reactor.listenUNIX(self.socket, self.server) def tearDown(self): super(MethodCallFunctionalTest, self).tearDown() self.port.stopListening() @inlineCallbacks def test_connect(self): """ The L{RemoteObject} resulting form the deferred returned by L{MethodCallClientFactory.getRemoteObject} is properly connected to the remote peer. """ connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() result = yield remote.method("john") self.assertEqual(result, "John") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_connect_with_max_retries(self): """ If L{MethodCallClientFactory.maxRetries} is set, then the factory will give up trying to connect after that amout of times. """ self.port.stopListening() self.client.maxRetries = 0 reactor.connectUNIX(self.socket, self.client) yield self.assertFailure(self.client.getRemoteObject(), ConnectError) @inlineCallbacks def test_reconnect(self): """ If the connection is lost, the L{RemoteObject} created by the factory will transparently handle the reconnection. """ self.client.factor = 0.01 # Try reconnecting very quickly connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect and wait till we connect again deferred = Deferred() self.client.notifyOnConnect(deferred.callback) connector.disconnect() yield deferred # The remote object is still working result = yield remote.method("john") self.assertEqual(result, "John") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_retry(self): """ If the connection is lost, the L{RemoteObject} created by the creator will transparently retry to perform the L{MethodCall} requests that failed due to the broken connection. """ self.client.factor = 0.01 # Try reconnecting very quickly self.client.retryOnReconnect = True connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect connector.disconnect() # This call will fail but it's transparently retried result = yield remote.method("john") self.assertEqual(result, "John") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_retry_with_method_call_error(self): """ If a retried L{MethodCall} request fails due to a L{MethodCallError}, the L{RemoteObject} will properly propagate the error to the original caller. """ self.methods.remove("method") self.client.factor = 0.01 # Try reconnecting very quickly self.client.retryOnReconnect = True connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect connector.disconnect() # A method call error is not retried yield self.assertFailure(remote.method(), MethodCallError) self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_wb_retry_with_while_still_disconnected(self): """ The L{RemoteObject._retry} method gets called as soon as a new connection is ready. If for whatever reason the connection drops again very quickly, the C{_retry} method will behave as expected. """ self.methods.remove("method") self.client.factor = 0.01 # Try reconnecting very quickly self.client.retryOnReconnect = True connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect connector.disconnect() def handle_reconnect(protocol): # In this precise moment we have a newly connected protocol remote._sender._protocol = protocol # Pretend that the connection is lost again very quickly protocol.transport.loseConnection() # Force RemoteObject._retry to run using a disconnected protocol reactor.callLater(0, remote._retry) # Restore the real handler and start listening again very soon self.client.dontNotifyOnConnect(handle_reconnect) self.client.notifyOnConnect(remote._handle_connect) def assert_failure(error): self.assertEqual(str(error), "Forbidden method 'method'") # Use our own reconnect handler self.client.dontNotifyOnConnect(remote._handle_connect) self.client.notifyOnConnect(handle_reconnect) error = yield self.assertFailure(remote.method(), MethodCallError) self.assertEqual(str(error), "Forbidden method 'method'") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_retry_with_many_method_calls(self): """ If several L{MethodCall} requests were issued while disconnected, they will be all eventually completed when the connection gets established again. """ self.client.factor = 0.01 # Try reconnecting very quickly self.client.retryOnReconnect = True connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect connector.disconnect() result1 = yield remote.method("john") result2 = yield remote.method("bill") self.assertEqual(result1, "John") self.assertEqual(result2, "Bill") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_retry_without_retry_on_reconnect(self): """ If C{retryOnReconnect} is C{False}, the L{RemoteObject} object won't retry to perform requests which failed because the connection was lost, however requests made after a reconnection will still succeed. """ self.client.factor = 0.01 # Try reconnecting very quickly connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect deferred = Deferred() self.client.notifyOnConnect(deferred.callback) connector.disconnect() yield self.assertFailure(remote.modt(), ConnectionDone) # Wait for reconnection and peform another call yield deferred result = yield remote.method("john") self.assertEqual(result, "John") self.client.stopTrying() connector.disconnect() @inlineCallbacks def test_retry_with_timeout(self): """ If a C{retryTimeout} is set, the L{RemoteObject} object will errback failed L{MethodCall}s after that amount of seconds, without retrying them when the connection established again. """ self.client.retryOnReconnect = True self.client.retryTimeout = 0.1 self.client.factor = 1 # Reconnect slower than timeout connector = reactor.connectUNIX(self.socket, self.client) remote = yield self.client.getRemoteObject() # Disconnect connector.disconnect() error = yield self.assertFailure(remote.method("foo"), MethodCallError) self.assertEqual("timeout", str(error)) self.client.stopTrying() connector.disconnect() landscape-client-14.01/landscape/lib/tests/test_scriptcontent.py0000644000175000017500000000141512301414317024710 0ustar andreasandreasimport unittest from landscape.lib.scriptcontent import (build_script, generate_script_hash) class ScriptContentTest(unittest.TestCase): def test_concatenate(self): self.assertEqual(build_script(u"/bin/sh", u"echo 1.0\n"), "#!/bin/sh\necho 1.0\n") def test_concatenate_null_strings(self): self.assertEqual(build_script(None, None), "#!\n") def test_generate_script_hash(self): hash1 = generate_script_hash("#!/bin/sh\necho 1.0\n") hash2 = generate_script_hash("#!/bin/sh\necho 1.0\n") hash3 = generate_script_hash("#!/bin/sh\necho 3.0\n") self.assertEqual(hash1, hash2) self.assertNotEqual(hash1, hash3) self.assertTrue(isinstance(hash1, str)) landscape-client-14.01/landscape/lib/tests/test_gpg.py0000644000175000017500000000501212301414317022563 0ustar andreasandreasimport os from twisted.internet import reactor from twisted.internet.defer import Deferred from landscape.tests.helpers import LandscapeTest from landscape.lib.gpg import gpg_verify class GPGTest(LandscapeTest): def test_gpg_verify(self): """ L{gpg_verify} runs the given gpg binary and returns C{True} if the provided signature is valid. """ gpg_options = self.makeFile() gpg = self.makeFile("#!/bin/sh\n" "touch $3/trustdb.gpg\n" "echo -n $@ > %s\n" % gpg_options) os.chmod(gpg, 0755) gpg_home = self.makeDir() mkdtemp_mock = self.mocker.replace("tempfile.mkdtemp") mkdtemp_mock() self.mocker.result(gpg_home) self.mocker.replay() deferred = Deferred() def do_test(): result = gpg_verify("/some/file", "/some/signature", gpg=gpg) def check_result(ignored): self.assertEqual( open(gpg_options).read(), "--no-options --homedir %s --no-default-keyring " "--ignore-time-conflict --keyring /etc/apt/trusted.gpg " "--verify /some/signature /some/file" % gpg_home) self.assertFalse(os.path.exists(gpg_home)) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_gpg_verify_with_non_zero_gpg_exit_code(self): """ L{gpg_verify} runs the given gpg binary and returns C{False} if the provided signature is not valid. """ gpg = self.makeFile("#!/bin/sh\necho out; echo err >&2; exit 1\n") os.chmod(gpg, 0755) gpg_home = self.makeDir() mkdtemp_mock = self.mocker.replace("tempfile.mkdtemp") mkdtemp_mock() self.mocker.result(gpg_home) self.mocker.replay() deferred = Deferred() def do_test(): result = gpg_verify("/some/file", "/some/signature", gpg=gpg) def check_failure(failure): self.assertEqual(str(failure.value), "%s failed (out='out\n', err='err\n', " "code='1')" % gpg) self.assertFalse(os.path.exists(gpg_home)) result.addCallback(self.fail) result.addErrback(check_failure) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred landscape-client-14.01/landscape/lib/tests/test_monitor.py0000644000175000017500000001753712301414317023514 0ustar andreasandreasfrom landscape.lib.monitor import ( Timer, Monitor, BurstMonitor, CoverageMonitor, FrequencyMonitor) from landscape.reactor import FakeReactor from landscape.tests.helpers import LandscapeTest class ReactorHavingTest(LandscapeTest): def setUp(self): super(ReactorHavingTest, self).setUp() self.reactor = FakeReactor() class TimerTest(ReactorHavingTest): def setUp(self): super(TimerTest, self).setUp() self.timer = Timer(create_time=self.reactor.time) def test_since_start(self): self.assertEqual(self.timer.since_start(), 0.0) self.reactor.advance(1) self.assertEqual(self.timer.since_start(), 1.0) self.reactor.advance(2) self.assertEqual(self.timer.since_start(), 3.0) def test_since_reset(self): self.reactor.advance(1) self.assertEqual(self.timer.since_reset(), 1.0) self.reactor.advance(1) self.assertEqual(self.timer.since_start(), 2.0) self.reactor.advance(2) self.timer.reset() self.assertEqual(self.timer.since_start(), 4.0) class MonitorTest(ReactorHavingTest): def setUp(self): super(MonitorTest, self).setUp() self.monitor = Monitor("test", create_time=self.reactor.time) def test_ping(self): self.assertEqual(self.monitor.count, 0) self.assertEqual(self.monitor.total_count, 0) self.monitor.ping() self.assertEqual(self.monitor.count, 1) self.assertEqual(self.monitor.total_count, 1) def test_reset(self): self.assertEqual(self.monitor.count, 0) self.monitor.ping() self.monitor.ping() self.assertEqual(self.monitor.count, 2) self.assertEqual(self.monitor.total_count, 2) self.monitor.reset() self.monitor.ping() self.assertEqual(self.monitor.count, 1) self.assertEqual(self.monitor.total_count, 3) def test_log(self): for i in range(100): self.monitor.ping() self.reactor.advance(1) self.monitor.log() self.assertTrue("INFO: 100 test events occurred in the last 100.00s." in self.logfile.getvalue()) class BurstMonitorTest(ReactorHavingTest): def setUp(self): super(BurstMonitorTest, self).setUp() self.monitor = BurstMonitor(60, 1, "test", create_time=self.reactor.time) def test_warn_no_pings(self): self.assertFalse(self.monitor.warn()) def test_warn_below_threshold(self): self.monitor.ping() self.reactor.advance(61) self.assertFalse(self.monitor.warn()) def test_warn_on_threshold(self): self.monitor.ping() self.reactor.advance(61) self.assertFalse(self.monitor.warn()) def test_warn_over_threshold(self): self.monitor.ping() self.reactor.advance(30) self.monitor.ping() self.assertTrue(self.monitor.warn()) self.reactor.advance(31) self.assertFalse(self.monitor.warn()) def test_warn_in_first_interval(self): self.monitor.ping() self.reactor.advance(59) self.assertFalse(self.monitor.warn()) def test_warn_unexpected_burst(self): self.monitor.ping() self.reactor.advance(5000) self.assertFalse(self.monitor.warn()) self.monitor.ping() self.assertFalse(self.monitor.warn()) self.monitor.ping() self.assertTrue(self.monitor.warn()) def test_warn_maximum_count(self): monitor = BurstMonitor(60, 2, "test", create_time=self.reactor.time) monitor.ping() monitor.ping() self.assertFalse(monitor.warn()) monitor.ping() self.assertTrue(monitor.warn()) def test_warn_maximum_count_over_time_span(self): monitor = BurstMonitor(60, 3, "test", create_time=self.reactor.time) monitor.ping() monitor.ping() self.assertFalse(monitor.warn()) self.reactor.advance(30) monitor.ping() self.assertFalse(monitor.warn()) self.reactor.advance(31) monitor.ping() self.assertFalse(monitor.warn()) monitor.ping() monitor.ping() self.assertTrue(monitor.warn()) class CoverageMonitorTest(ReactorHavingTest): def setUp(self): super(CoverageMonitorTest, self).setUp() self.monitor = CoverageMonitor(1, 1.0, "test", create_time=self.reactor.time) def test_warn(self): self.monitor.ping() self.reactor.advance(1) self.assertFalse(self.monitor.warn()) self.reactor.advance(1) self.assertTrue(self.monitor.warn()) self.monitor.reset() self.assertFalse(self.monitor.warn()) def test_percent_no_data(self): """ If no time has passed and the monitor hasn't received any pings it should return 100%. """ self.assertEqual(self.monitor.percent, 1.0) def test_percent(self): self.reactor.advance(1) self.assertEqual(self.monitor.percent, 0.0) self.monitor.ping() self.reactor.advance(1) self.assertEqual(self.monitor.percent, 0.5) def test_percent_reset(self): self.reactor.advance(1) self.assertEqual(self.monitor.percent, 0.0) self.monitor.reset() self.monitor.ping() self.reactor.advance(1) self.assertEqual(self.monitor.percent, 1.0) def test_expected_count(self): self.reactor.advance(1) self.assertEqual(self.monitor.expected_count, 1.0) self.reactor.advance(1) self.assertEqual(self.monitor.expected_count, 2.0) def test_expected_count_reset(self): self.reactor.advance(1) self.assertEqual(self.monitor.expected_count, 1.0) self.monitor.reset() self.reactor.advance(1) self.assertEqual(self.monitor.expected_count, 1.0) def test_log(self): for i in range(100): self.monitor.ping() self.reactor.advance(1) self.monitor.log() self.assertTrue("INFO: 100 of 100 expected test events (100.00%) " "occurred in the last 100.00s." in self.logfile.getvalue()) def test_log_warning(self): for i in range(100): self.reactor.advance(1) self.monitor.log() self.assertTrue("WARNING: 0 of 100 expected test events (0.00%) " "occurred in the last 100.00s." in self.logfile.getvalue()) class FrequencyMonitorTest(ReactorHavingTest): def setUp(self): super(FrequencyMonitorTest, self).setUp() self.monitor = FrequencyMonitor(100, 1, "test", create_time=self.reactor.time) def test_expected_count(self): self.assertEqual(self.monitor.expected_count, 0) self.reactor.advance(99) self.assertEqual(self.monitor.expected_count, 0) self.reactor.advance(1) self.assertEqual(self.monitor.expected_count, 1) def test_ping(self): self.assertFalse(self.monitor.warn()) self.reactor.advance(80) self.monitor.ping() self.assertFalse(self.monitor.warn()) self.reactor.advance(80) self.assertFalse(self.monitor.warn()) def test_warn(self): self.assertFalse(self.monitor.warn()) self.reactor.advance(101) self.assertTrue(self.monitor.warn()) def test_log(self): self.monitor.ping() self.reactor.advance(100) self.monitor.log() self.assertTrue("minimum expected test events" not in self.logfile.getvalue()) self.reactor.advance(1) self.monitor.log() self.assertTrue("WARNING: Only 0 of 1 minimum expected test events " "occurred in the last 100.00s." in self.logfile.getvalue()) landscape-client-14.01/landscape/lib/tests/test_persist.py0000644000175000017500000003754712301414317023521 0ustar andreasandreasimport pprint import os from landscape.lib.persist import ( path_string_to_tuple, path_tuple_to_string, Persist, RootedPersist, PickleBackend, PersistError, PersistReadOnlyError) from landscape.tests.helpers import LandscapeTest class PersistHelpersTest(LandscapeTest): paths = [ ("ab", ("ab",)), ("ab.cd", ("ab", "cd")), ("ab.cd.de", ("ab", "cd", "de")), ("ab[0]", ("ab", 0)), ("ab[0][1]", ("ab", 0, 1)), ("ab.cd[1]", ("ab", "cd", 1)), ("ab[0].cd[1]", ("ab", 0, "cd", 1)), ("ab.cd.de[2]", ("ab", "cd", "de", 2)), ] def test_path_string_to_tuple(self): for path_string, path_tuple in self.paths: self.assertEqual(path_string_to_tuple(path_string), path_tuple) def test_path_string_to_tuple_error(self): self.assertRaises(PersistError, path_string_to_tuple, "ab[0][c]") def test_path_tuple_to_string(self): for path_string, path_tuple in self.paths: self.assertEqual(path_tuple_to_string(path_tuple), path_string) class BasePersistTest(LandscapeTest): set_items = [ ("ab", 1), ("ab", 2), ("cd.ef", 3.4), ("cd.gh", "4"), ("cd.gh", "5"), ("cd.ij.kl", (1, 2.3, "4", [5], (6,))), ("cd.ij.mn", [1, 2.3, "4", [5], (6,)]), ("cd.ij.op[1]", 0), ("cd.ij.op[1]", 1), ("cd.ij.op[2]", 2), ("qr", {"s": {"t": "u"}}), ("v", [0, {}, 2]), ("v[1].v", "woot"), ] set_result = { "ab": 2, "cd": { "ef": 3.4, "gh": "5", "ij": { "kl": (1, 2.3, "4", [5], (6,)), "mn": [1, 2.3, "4", [5], (6,)], "op": [0, 1, 2] }, }, "qr": {"s": {"t": "u"}}, "v": [0, {"v": "woot"}, 2], } get_items = [ ("ab", 2), ("cd.ef", 3.4), ("cd.gh", "5"), ("cd.ij.kl", (1, 2.3, "4", [5], (6,))), ("cd.ij.kl[3]", [5]), ("cd.ij.kl[3][0]", 5), ("cd.ij.mn", [1, 2.3, "4", [5], (6,)]), ("cd.ij.mn.4", "4"), ("cd.ij.mn.5", None), ("cd.ij.op", [0, 1, 2]), ("cd.ij.op[0]", 0), ("cd.ij.op[1]", 1), ("cd.ij.op[2]", 2), ("cd.ij.op[3]", None), ("qr", {"s": {"t": "u"}}), ("qr.s", {"t": "u"}), ("qr.s.t", "u"), ("x", None), ("x.y.z", None), ] add_items = [ ("ab", 1), ("ab", 2.3), ("ab", "4"), ("ab", [5]), ("ab", (6,)), ("ab", {}), ("ab[5].cd", "foo"), ("ab[5].cd", "bar"), ] add_result = { "ab": [1, 2.3, "4", [5], (6,), {"cd": ["foo", "bar"]}], } def setUp(self): LandscapeTest.setUp(self) self.persist = self.build_persist() def tearDown(self): del self.persist LandscapeTest.tearDown(self) def build_persist(self, *args, **kwargs): return Persist(*args, **kwargs) def format(self, result, expected): repr_result = pprint.pformat(result) repr_expected = pprint.pformat(expected) return "\nResult:\n%s\nExpected:\n%s\n" % (repr_result, repr_expected) class GeneralPersistTest(BasePersistTest): def test_set(self): for path, value in self.set_items: self.persist.set(path, value) result = self.persist.get((), hard=True) self.assertEqual(result, self.set_result, self.format(result, self.set_result)) def test_set_tuple_paths(self): for path, value in self.set_items: self.persist.set(path_string_to_tuple(path), value) result = self.persist.get((), hard=True) self.assertEqual(result, self.set_result, self.format(result, self.set_result)) def test_set_from_result(self): for path in self.set_result: self.persist.set(path, self.set_result[path]) result = self.persist.get((), hard=True) self.assertEqual(result, self.set_result, self.format(result, self.set_result)) def test_get(self): for path in self.set_result: self.persist.set(path, self.set_result[path]) for path, value in self.get_items: self.assertEqual(self.persist.get(path), value) def test_get_tuple_paths(self): for path in self.set_result: self.persist.set(path_string_to_tuple(path), self.set_result[path]) for path, value in self.get_items: self.assertEqual(self.persist.get(path), value) def test_add(self): for path, value in self.add_items: self.persist.add(path, value) result = self.persist.get((), hard=True) self.assertEqual(result, self.add_result, self.format(result, self.add_result)) def test_add_unique(self): self.persist.add("a", "b") self.assertEqual(self.persist.get("a"), ["b"]) self.persist.add("a", "b") self.assertEqual(self.persist.get("a"), ["b", "b"]) self.persist.add("a", "b", unique=True) self.assertEqual(self.persist.get("a"), ["b", "b"]) self.persist.add("a", "c", unique=True) self.assertEqual(self.persist.get("a"), ["b", "b", "c"]) def test_keys(self): self.persist.set("a", {"b": 1, "c": {"d": 2}, "e": list("foo")}) keys = self.persist.keys self.assertEqual(set(keys((), hard=True)), set(["a"])) self.assertEqual(set(keys("a")), set(["b", "c", "e"])) self.assertEqual(set(keys("a.d")), set([])) self.assertEqual(set(keys("a.e")), set([0, 1, 2])) self.assertEqual(set(keys("a.f")), set([])) self.assertRaises(PersistError, keys, "a.b") def test_has(self): self.persist.set("a", {"b": 1, "c": {"d": 2}, "e": list("foo")}) has = self.persist.has self.assertTrue(has("a")) self.assertTrue(has(("a", "b"))) self.assertTrue(has("a.c")) self.assertTrue(has("a.c", "d")) self.assertTrue(has("a.c.d")) self.assertTrue(has("a.e")) self.assertTrue(has("a.e[0]")) self.assertTrue(has("a.e", "f")) self.assertTrue(has("a.e", "o")) self.assertFalse(has("b")) self.assertFalse(has("a.f")) self.assertFalse(has("a.c.f")) self.assertFalse(has("a.e[3]")) self.assertFalse(has("a.e", "g")) self.assertRaises(PersistError, has, "a.b.c") def test_remove(self): self.persist.set("a", {"b": [1], "c": {"d": 2}, "e": list("foot")}) get = self.persist.get has = self.persist.has remove = self.persist.remove self.assertFalse(remove("a.f")) self.assertRaises(PersistError, remove, "a.c.d.e") self.assertTrue(remove(("a", "e", "o"))) self.assertEqual(get("a.e"), ["f", "t"]) self.assertFalse(remove("a.e[2]")) self.assertEqual(get("a.e"), ["f", "t"]) self.assertTrue(remove("a.e[1]")) self.assertEqual(get("a.e"), ["f"]) self.assertTrue(remove("a.e", "f")) self.assertFalse(has("a.e")) self.assertFalse(remove("a.b[1]")) self.assertEqual(get("a.b"), [1]) self.assertTrue(remove("a.b", 1)) self.assertFalse(has("a.b")) self.assertTrue(remove("a.c")) self.assertFalse(has("a.c")) self.assertFalse(has("a")) def test_move(self): self.persist.set("a", {"b": [1], "c": {"d": 2}}) move = self.persist.move get = self.persist.get self.assertTrue(move("a.b", "a.c.b")) self.assertEqual(get("a"), {"c": {"b": [1], "d": 2}}) self.assertTrue(move("a.c.b[0]", "a.c.b")) self.assertEqual(get("a"), {"c": {"b": 1, "d": 2}}) self.assertTrue(move(("a", "c", "b"), ("a", "c", "b", 0))) self.assertEqual(get("a"), {"c": {"b": [1], "d": 2}}) def test_copy_values_on_set(self): d = {"b": 1} d_orig = d.copy() self.persist.set("a", d) d["c"] = 2 self.assertEqual(self.persist.get("a"), d_orig) def test_copy_values_on_add(self): d = {"b": 1} d_orig = d.copy() self.persist.add("a", d) d["c"] = 2 self.assertEqual(self.persist.get("a[0]"), d_orig) def test_copy_values_on_get(self): self.persist.set("a", {"b": 1}) d = self.persist.get("a") d_orig = d.copy() d["c"] = 2 self.assertEqual(self.persist.get("a"), d_orig) def test_root_at(self): rooted = self.persist.root_at("my-module") rooted.set("option", 1) self.assertEqual(self.persist.get("my-module.option"), 1) class SaveLoadPersistTest(BasePersistTest): def test_readonly(self): self.assertFalse(self.persist.readonly) self.persist.readonly = True self.assertTrue(self.persist.readonly) self.persist.readonly = False self.assertFalse(self.persist.readonly) self.persist.readonly = True self.assertRaises(PersistReadOnlyError, self.persist.set, "ab", 2) self.assertRaises(PersistReadOnlyError, self.persist.add, "ab", 3) self.assertRaises(PersistReadOnlyError, self.persist.remove, "ab", 4) self.assertRaises(PersistReadOnlyError, self.persist.move, "ab", "cd") for keyword in ["weak", "soft"]: kwargs = {keyword: True} self.persist.set("ab", 2, **kwargs) self.persist.add("cd", 2, **kwargs) self.persist.remove("ab", **kwargs) self.persist.move("cd", "ef", **kwargs) def test_assert_writable(self): self.persist.assert_writable() self.persist.set("ab", 1) self.persist.readonly = True self.assertRaises(PersistReadOnlyError, self.persist.assert_writable) def test_modified(self): self.assertFalse(self.persist.modified) self.persist.set("ab", 1) self.assertTrue(self.persist.modified) self.persist.reset_modified() self.assertFalse(self.persist.modified) self.persist.add("cd", 2) self.assertTrue(self.persist.modified) self.persist.reset_modified() self.assertFalse(self.persist.modified) self.persist.remove("ab") self.assertTrue(self.persist.modified) self.persist.reset_modified() self.assertFalse(self.persist.modified) self.persist.move("cd", "ef") self.assertTrue(self.persist.modified) def test_save_and_load(self): for path in self.set_result: self.persist.set(path, self.set_result[path]) filename = self.makeFile() self.persist.save(filename) persist = self.build_persist() persist.load(filename) result = persist.get((), hard=True) self.assertEqual(result, self.set_result, self.format(result, self.set_result)) def test_save_on_unexistent_dir(self): dirname = self.makeFile() filename = os.path.join(dirname, "foobar") self.assertFalse(os.path.exists(dirname)) self.persist.save(filename) self.assertTrue(os.path.isfile(filename)) def test_save_creates_backup(self): filename = self.makePersistFile("foobar") filename_old = filename + ".old" self.assertFalse(os.path.exists(filename_old)) self.persist.save(filename) self.assertTrue(os.path.exists(filename_old)) def test_save_to_default_file(self): """ Persist can be constructed with a filename, and Persist.save with no arguments will write to that filename. """ filename = self.makeFile() persist = self.build_persist(filename=filename) self.assertFalse(os.path.exists(filename)) persist.save() self.assertTrue(os.path.exists(filename)) def test_save_to_no_default_file(self): """ If no default filename was given, calling Persist.save with no arguments will raise a PersistError. """ self.assertRaises(PersistError, self.persist.save) def test_load_default_file(self): """ If a Persist is created with a default filename, and the filename exists, it will be loaded. """ filename = self.makeFile() persist = self.build_persist(filename=filename) persist.set("foo", "bar") persist.save() persist = self.build_persist(filename=filename) self.assertEqual(persist.get("foo"), "bar") def test_load_restores_backup(self): filename = self.makePersistFile("foobar") filename_old = filename + ".old" self.persist.set("a", 1) self.persist.save(filename_old) persist = self.build_persist() persist.load(filename) self.assertEqual(persist.get("a"), 1) def test_load_empty_files_wont_break(self): filename = self.makeFile("") self.persist.load(filename) def test_load_empty_files_restore_backup(self): """ If the current file is empty, it tries to load the old one if it exists. """ filename = self.makeFile("") filename_old = filename + ".old" self.persist.set("a", 1) self.persist.save(filename_old) persist = self.build_persist() persist.load(filename) self.assertEqual(persist.get("a"), 1) def test_non_existing_raise_error(self): """ Trying to load a file that doesn't exist result in a L{PersistError}. """ persist = self.build_persist() self.assertRaises(PersistError, persist.load, "/nonexistent") def test_non_existing_restore_backup(self): """ If the file doesn't exist, it tries to load the old one if present and valid. """ filename = self.makeFile("") filename_old = filename + ".old" os.unlink(filename) self.persist.set("a", 1) self.persist.save(filename_old) persist = self.build_persist() persist.load(filename) self.assertEqual(persist.get("a"), 1) class PicklePersistTest(GeneralPersistTest, SaveLoadPersistTest): def build_persist(self, *args, **kwargs): return Persist(PickleBackend(), *args, **kwargs) class RootedPersistTest(GeneralPersistTest): def build_persist(self, *args, **kwargs): return RootedPersist(Persist(), "root.path", *args, **kwargs) def test_readonly(self): self.assertFalse(self.persist.readonly) self.assertRaises(AttributeError, setattr, self.persist, "readonly", True) self.persist.parent.readonly = True self.assertTrue(self.persist.readonly) def test_assert_writable(self): self.persist.assert_writable() self.persist.set("ab", 1) self.persist.parent.readonly = True self.assertRaises(PersistReadOnlyError, self.persist.assert_writable) def test_modified(self): self.assertFalse(self.persist.modified) self.persist.set("ab", 1) self.assertTrue(self.persist.modified) self.persist.parent.reset_modified() self.assertFalse(self.persist.modified) self.persist.add("cd", 2) self.assertTrue(self.persist.modified) self.persist.parent.reset_modified() self.assertFalse(self.persist.modified) self.persist.remove("ab") self.assertTrue(self.persist.modified) self.persist.parent.reset_modified() self.assertFalse(self.persist.modified) self.persist.move("cd", "ef") self.assertTrue(self.persist.modified) landscape-client-14.01/landscape/lib/tests/test_lsb_release.py0000644000175000017500000000260612301414317024274 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.lib.lsb_release import parse_lsb_release class LsbReleaseTest(LandscapeTest): def test_parse_lsb_release(self): """ L{parse_lsb_release} returns a C{dict} holding information from the given LSB release file. """ lsb_release_filename = self.makeFile("DISTRIB_ID=Ubuntu\n" "DISTRIB_RELEASE=6.06\n" "DISTRIB_CODENAME=dapper\n" "DISTRIB_DESCRIPTION=" "\"Ubuntu 6.06.1 LTS\"\n") self.assertEqual(parse_lsb_release(lsb_release_filename), {"distributor-id": "Ubuntu", "description": "Ubuntu 6.06.1 LTS", "release": "6.06", "code-name": "dapper"}) def test_parse_lsb_release_with_missing_or_extra_fields(self): """ L{parse_lsb_release} ignores lines not matching the map of known keys, and returns only keys with an actual value. """ lsb_release_filename = self.makeFile("DISTRIB_ID=Ubuntu\n" "FOO=Bar\n") self.assertEqual(parse_lsb_release(lsb_release_filename), {"distributor-id": "Ubuntu"}) landscape-client-14.01/landscape/lib/tests/test_vm_info.py0000644000175000017500000001360212301414317023447 0ustar andreasandreasimport os from landscape.tests.helpers import LandscapeTest from landscape.lib.vm_info import get_vm_info, get_container_info class GetVMInfoTest(LandscapeTest): def setUp(self): super(GetVMInfoTest, self).setUp() self.root_path = self.makeDir() self.proc_path = self.makeDir( path=os.path.join(self.root_path, "proc")) self.sys_path = self.makeDir(path=os.path.join(self.root_path, "sys")) self.proc_sys_path = self.makeDir( path=os.path.join(self.proc_path, "sys")) def make_sys_vendor(self, content): """Create /sys/class/dmi/id/sys_vendor with the specified content.""" dmi_path = os.path.join(self.root_path, "sys/class/dmi/id") self.makeDir(path=dmi_path) self.makeFile(dirname=dmi_path, basename="sys_vendor", content=content) def test_get_vm_info_empty_when_no_virtualization_is_found(self): """ L{get_vm_info} should be empty when there's no virtualisation. """ self.assertEqual(u"", get_vm_info(root_path=self.root_path)) def test_get_vm_info_is_openvz_when_proc_vz_exists(self): """ L{get_vm_info} should return 'openvz' when /proc/vz exists. """ proc_vz_path = os.path.join(self.proc_path, "vz") self.makeFile(path=proc_vz_path, content="foo") self.assertEqual("openvz", get_vm_info(root_path=self.root_path)) def test_get_vm_info_is_xen_when_proc_sys_xen_exists(self): """ L{get_vm_info} should return 'xen' when /proc/sys/xen exists. """ proc_sys_xen_path = os.path.join(self.proc_sys_path, "xen") self.makeFile(path=proc_sys_xen_path, content="foo") self.assertEqual("xen", get_vm_info(root_path=self.root_path)) def test_get_vm_info_is_xen_when_sys_bus_xen_is_non_empty(self): """ L{get_vm_info} should return 'xen' when /sys/bus/xen exists and has devices. """ devices_xen_path = os.path.join(self.sys_path, "bus/xen/devices") self.makeDir(path=devices_xen_path) foo_devices_path = os.path.join(devices_xen_path, "foo") self.makeFile(path=foo_devices_path, content="bar") self.assertEqual("xen", get_vm_info(root_path=self.root_path)) def test_get_vm_info_is_xen_when_proc_xen_exists(self): """ L{get_vm_info} should return 'xen' when /proc/xen exists. """ proc_xen_path = os.path.join(self.proc_path, "xen") self.makeFile(path=proc_xen_path, content="foo") self.assertEqual("xen", get_vm_info(root_path=self.root_path)) def test_get_vm_info_is_empty_without_xen_devices(self): """ L{get_vm_info} returns an empty string if the /sys/bus/xen/devices directory exists but doesn't contain any file. """ devices_xen_path = os.path.join(self.sys_path, "bus/xen/devices") self.makeDir(path=devices_xen_path) self.assertEqual("", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_bochs_sys_vendor(self): """ L{get_vm_info} should return "kvm" when we detect the sys_vendor is Bochs. """ self.make_sys_vendor("Bochs") self.assertEqual("kvm", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_openstack_sys_vendor(self): """ L{get_vm_info} should return "kvm" when we detect the sys_vendor is Openstack. """ self.make_sys_vendor("OpenStack Foundation") self.assertEqual("kvm", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_vmware_sys_vendor(self): """ L{get_vm_info} should return "vmware" when we detect the sys_vendor is VMware Inc. """ self.make_sys_vendor("VMware, Inc.") self.assertEqual("vmware", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_virtualbox_sys_vendor(self): """ L{get_vm_info} should return "virtualbox" when we detect the sys_vendor is innotek GmbH. """ self.make_sys_vendor("innotek GmbH") self.assertEqual("virtualbox", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_microsoft_sys_vendor(self): """ L{get_vm_info} returns "hyperv" if the sys_vendor is Microsoft. """ self.make_sys_vendor("Microsoft Corporation") self.assertEqual("hyperv", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_kvm_on_other_architecture(self): """ L{get_vm_info} returns 'kvm', if no sys_vendor is available but the model in /proc/cpuinfo contains 'emulated by qemu'. """ cpuinfo_path = os.path.join(self.proc_path, "cpuinfo") cpuinfo = ( "platform : Some Machine\n" "model : Some CPU (emulated by qemu)\n" "machine : Some Machine (emulated by qemu)\n") self.makeFile(path=cpuinfo_path, content=cpuinfo) self.assertEqual("kvm", get_vm_info(root_path=self.root_path)) def test_get_vm_info_with_other_vendor(self): """ L{get_vm_info} should return an empty string when the sys_vendor is unknown. """ self.make_sys_vendor("Some other vendor") self.assertEqual("", get_vm_info(root_path=self.root_path)) class GetContainerInfoTest(LandscapeTest): def test_no_container(self): """If not running in a container, an empty string is returned.""" self.assertEqual("", get_container_info(path="/does/not/exist")) def test_in_container(self): """If running in a container, the container type is returned.""" path = self.makeFile(content="lxc") self.assertEqual("lxc", get_container_info(path=path)) def test_strip_newline(self): """The container type doesn't contain newlines.""" path = self.makeFile(content="lxc\n") self.assertEqual("lxc", get_container_info(path=path)) landscape-client-14.01/landscape/lib/tests/test_warning.py0000644000175000017500000000165412301414317023463 0ustar andreasandreasimport warnings from twisted.trial.unittest import TestCase from landscape.lib.warning import hide_warnings class WarningTest(TestCase): def setUp(self): super(WarningTest, self).setUp() self.orig_filters = warnings.filters[:] def tearDown(self): super(WarningTest, self).tearDown() warnings.filters[:] = self.orig_filters def test_hide_warnings(self): hide_warnings() filters = warnings.filters[:2] # Warning filters are processed beginning to end, and the first filter # which matches a particular warning is used. self.assertEqual( filters, # The frontmost should "default" (i.e. print) on UserWarnings [("default", None, UserWarning, None, 0), # The one just behind that should indicate that we should ignore # all other warnings. ("ignore", None, Warning, None, 0)]) landscape-client-14.01/landscape/lib/tests/test_encoding.py0000644000175000017500000000454212301414317023603 0ustar andreasandreas# -*- coding: utf-8 -*- from landscape.tests.helpers import LandscapeTest from landscape.lib.encoding import encode_if_needed, encode_dict_if_needed class EncodingTest(LandscapeTest): def test_encode_if_needed_utf_string(self): """ When passed an utf-8 str() instance the encode_if_needed function returns the same. """ value = "請不要刪除" result = encode_if_needed(value) self.assertEqual(value, result) def test_encode_if_needed_utf16_string(self): """ When passed an unicode instance that is a decode()'d unicode (utf-16), the encode_if_needed function returns the utf-16 str() equivalent (in utf-8). """ value = u"Alex \U0001f603" result = encode_if_needed(value) expected = 'Alex \xf0\x9f\x98\x83' self.assertEqual(expected, result) def test_encode_if_needed_utf_unicode(self): """ When passed an unicode instance that is a decode()'d unicode, the encode_if_needed function returns the utf-8 str() equivalent. """ value = u'\u8acb\u4e0d\u8981\u522a\u9664' expected = "請不要刪除" result = encode_if_needed(value) self.assertEqual(expected, result) def test_encode_if_needed_utf_unicode_string(self): """ When passed an encoded() unicode instance, the encode_if_needed function returns the utf-8 str() equivalent. """ value = u"請不要刪除" expected = "請不要刪除" result = encode_if_needed(value) self.assertEqual(expected, result) def test_encode_if_needed_with_null_value(self): """ When passed None, the encode_if_needed function returns None. """ self.assertIs(None, encode_if_needed(None)) def test_encode_dict_if_needed(self): """ The encode_dict_if_needed function returns a dict for which every value was passed to the encode_if_needed function. """ value = {"a": "請不要刪除", "b": u'\u8acb\u4e0d\u8981\u522a\u9664', "c": u"請不要刪除", "d": None, "e": 123} expected = {"a": "請不要刪除", "b": "請不要刪除", "c": "請不要刪除", "d": None, "e": 123} result = encode_dict_if_needed(value) self.assertEqual(expected, result) landscape-client-14.01/landscape/lib/tests/__init__.py0000644000175000017500000000000012301414317022476 0ustar andreasandreaslandscape-client-14.01/landscape/lib/tests/test_cloud.py0000644000175000017500000001262112301414317023120 0ustar andreasandreasfrom landscape.lib.cloud import ( EC2_API, _fetch_ec2_item, fetch_ec2_meta_data, MAX_LENGTH) from landscape.lib.fetch import HTTPCodeError, PyCurlError from landscape.tests.helpers import LandscapeTest from twisted.internet.defer import succeed, fail class CloudTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.query_results = {} self.kwargs = {} def fetch_stub(url, **kwargs): self.kwargs = kwargs value = self.query_results[url] if isinstance(value, Exception): return fail(value) else: return succeed(value) self.fetch_func = fetch_stub self.add_query_result("instance-id", "i00001") self.add_query_result("ami-id", "ami-00002") self.add_query_result("instance-type", "hs1.8xlarge") def add_query_result(self, name, value): """ Add a url to self.query_results that is then available through self.fetch_func. """ url = "%s/meta-data/%s" % (EC2_API, name) self.query_results[url] = value def test_fetch_ec2_meta_data_error_on_any_item_error(self): """ L{_fetch_ec2_meta_data} returns a deferred C{Failure} containing the error message when an error occurs on any of the queried meta-data items C{instance-id}, C{ami-id} or C{instance-type}. """ self.log_helper.ignore_errors(HTTPCodeError) error = HTTPCodeError(404, "notfound") metadata_items = ["instance-id", "ami-id", "instance-type"] for item in metadata_items: # reset all item data adding the error to only 1 item per iteration for setup_item in metadata_items: if setup_item == item: self.add_query_result(item, error) else: self.add_query_result(setup_item, "value%s" % setup_item) deferred = fetch_ec2_meta_data(fetch=self.fetch_func) failure = self.failureResultOf(deferred) self.assertEqual( "Server returned HTTP code 404", failure.getErrorMessage()) def test_fetch_ec2_meta_data(self): """ L{_fetch_ec2_meta_data} returns a C{dict} containing meta-data for C{instance-id}, C{ami-id} and C{instance-type}. """ deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) self.assertEqual( {"ami-id": u"ami-00002", "instance-id": u"i00001", "instance-type": u"hs1.8xlarge"}, result) def test_fetch_ec2_meta_data_utf8(self): """ L{_fetch_ec2_meta_data} decodes utf-8 strings returned from the external service. """ self.add_query_result("ami-id", "asdf\xe1\x88\xb4") deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) self.assertEqual({"instance-id": u"i00001", "ami-id": u"asdf\u1234", "instance-type": u"hs1.8xlarge"}, result) def test_fetch_ec2_meta_data_truncates(self): """L{_fetch_ec2_meta_data} truncates values that are too long.""" self.add_query_result("ami-id", "a" * MAX_LENGTH * 5) self.add_query_result("instance-id", "b" * MAX_LENGTH * 5) self.add_query_result("instance-type", "c" * MAX_LENGTH * 5) deferred = fetch_ec2_meta_data(fetch=self.fetch_func) result = self.successResultOf(deferred) self.assertEqual( {"ami-id": "a" * MAX_LENGTH, "instance-id": "b" * MAX_LENGTH, "instance-type": "c" * MAX_LENGTH}, result) def test_wb_fetch_ec2_item_multiple_items_appends_accumulate_list(self): """ L{_fetch_ec2_item} retrieves individual meta-data items from the EC2 api and appends them to the C{list} provided by the C{accumulate} parameter. """ accumulate = [] self.successResultOf( _fetch_ec2_item("instance-id", accumulate, fetch=self.fetch_func)) self.successResultOf( _fetch_ec2_item( "instance-type", accumulate, fetch=self.fetch_func)) self.assertEqual(["i00001", "hs1.8xlarge"], accumulate) def test_wb_fetch_ec2_item_error_returns_failure(self): """ L{_fetch_ec2_item} returns a deferred C{Failure} containing the error message when faced with no EC2 cloud API service. """ self.log_helper.ignore_errors(PyCurlError) self.add_query_result("other-id", PyCurlError(60, "pycurl error")) accumulate = [] deferred = _fetch_ec2_item( "other-id", accumulate, fetch=self.fetch_func) failure = self.failureResultOf(deferred) self.assertEqual("Error 60: pycurl error", failure.getErrorMessage()) def test_wb_fetch_ec2_meta_data_nofollow(self): """ L{_fetch_ec2_meta_data} sets C{follow} to C{False} to avoid following HTTP redirects. """ self.log_helper.ignore_errors(PyCurlError) self.add_query_result("other-id", PyCurlError(60, "pycurl error")) accumulate = [] deferred = _fetch_ec2_item( "other-id", accumulate, fetch=self.fetch_func) self.failureResultOf(deferred) self.assertEqual({"follow": False}, self.kwargs) landscape-client-14.01/landscape/lib/tests/test_timestamp.py0000644000175000017500000000130212301414317024007 0ustar andreasandreasfrom datetime import datetime from landscape.lib.timestamp import to_timestamp from landscape.tests.helpers import LandscapeTest class TimestampTest(LandscapeTest): """Test for timestamp conversion function.""" def test_conversion(self): """Test ensures that the conversion returns an int, not a float.""" date = datetime.utcfromtimestamp(1000) timestamp = to_timestamp(date) self.assertTrue(isinstance(timestamp, int)) self.assertEqual(timestamp, 1000) def test_before_epoch_conversion(self): """Test converting a date before the epoch.""" date = datetime.utcfromtimestamp(-1000) self.assertEqual(to_timestamp(date), -1000) landscape-client-14.01/landscape/lib/tests/test_fetch.py0000644000175000017500000004307212301414317023107 0ustar andreasandreasimport os import pycurl from twisted.internet.defer import FirstError from landscape.lib.fetch import ( fetch, fetch_async, fetch_many_async, fetch_to_files, url_to_filename, HTTPCodeError, PyCurlError) from landscape.tests.helpers import LandscapeTest class CurlStub(object): def __init__(self, result=None, infos=None, error=None): self.result = result self.infos = infos if self.infos is None: self.infos = {pycurl.HTTP_CODE: 200} self.options = {} self.performed = False self.error = error def getinfo(self, what): if what in self.infos: return self.infos[what] raise RuntimeError("Stub doesn't know about %d info" % what) def setopt(self, option, value): if isinstance(value, unicode): raise AssertionError("setopt() doesn't accept unicode values") if self.performed: raise AssertionError("setopt() can't be called after perform()") self.options[option] = value def perform(self): if self.error: raise self.error if self.performed: raise AssertionError("Can't perform twice") self.options[pycurl.WRITEFUNCTION](self.result) self.performed = True class CurlManyStub(object): def __init__(self, url_results): self.curls = {} for url in url_results: result = url_results[url] if isinstance(result, str): body = result http_code = 200 else: body = result[0] http_code = result[1] self.curls[url] = CurlStub(body, {pycurl.HTTP_CODE: http_code}) self.current = None def getinfo(self, what): if not self.current.performed: raise AssertionError("getinfo() can't be called before perform()") result = self.current.getinfo(what) self.current = None return result def setopt(self, option, value): if option is pycurl.URL: self.current = self.curls[value] self.current.setopt(option, value) def perform(self): self.current.perform() class Any(object): def __eq__(self, other): return True class FetchTest(LandscapeTest): def test_basic(self): curl = CurlStub("result") result = fetch("http://example.com", curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_post(self): curl = CurlStub("result") result = fetch("http://example.com", post=True, curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.POST: True, pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_post_data(self): curl = CurlStub("result") result = fetch("http://example.com", post=True, data="data", curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options[pycurl.READFUNCTION](), "data") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.POST: True, pycurl.POSTFIELDSIZE: 4, pycurl.READFUNCTION: Any(), pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_cainfo(self): curl = CurlStub("result") result = fetch("https://example.com", cainfo="cainfo", curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "https://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.CAINFO: "cainfo", pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_cainfo_on_http(self): curl = CurlStub("result") result = fetch("http://example.com", cainfo="cainfo", curl=curl) self.assertEqual(result, "result") self.assertTrue(pycurl.CAINFO not in curl.options) def test_headers(self): curl = CurlStub("result") result = fetch("http://example.com", headers={"a": "1", "b": "2"}, curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.HTTPHEADER: ["a: 1", "b: 2"], pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_timeouts(self): curl = CurlStub("result") result = fetch("http://example.com", connect_timeout=5, total_timeout=30, curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 5, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 30, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_unicode(self): """ The L{fetch} function converts the C{url} parameter to C{str} before passing it to curl. """ curl = CurlStub("result") result = fetch(u"http://example.com", curl=curl) self.assertEqual(result, "result") self.assertEqual(curl.options[pycurl.URL], "http://example.com") self.assertTrue(isinstance(curl.options[pycurl.URL], str)) def test_non_200_result(self): curl = CurlStub("result", {pycurl.HTTP_CODE: 404}) try: fetch("http://example.com", curl=curl) except HTTPCodeError, error: self.assertEqual(error.http_code, 404) self.assertEqual(error.body, "result") else: self.fail("HTTPCodeError not raised") def test_http_error_str(self): self.assertEqual(str(HTTPCodeError(501, "")), "Server returned HTTP code 501") def test_http_error_repr(self): self.assertEqual(repr(HTTPCodeError(501, "")), "") def test_pycurl_error(self): curl = CurlStub(error=pycurl.error(60, "pycurl error")) try: fetch("http://example.com", curl=curl) except PyCurlError, error: self.assertEqual(error.error_code, 60) self.assertEqual(error.message, "pycurl error") else: self.fail("PyCurlError not raised") def test_pycurl_insecure(self): curl = CurlStub("result") result = fetch("http://example.com/get-ca-cert", curl=curl, insecure=True) self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com/get-ca-cert", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.SSL_VERIFYPEER: False, pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) def test_pycurl_error_str(self): self.assertEqual(str(PyCurlError(60, "pycurl error")), "Error 60: pycurl error") def test_pycurl_error_repr(self): self.assertEqual(repr(PyCurlError(60, "pycurl error")), "") def test_pycurl_follow_true(self): curl = CurlStub("result") result = fetch("http://example.com", curl=curl, follow=True) self.assertEqual(result, "result") self.assertEqual(1, curl.options[pycurl.FOLLOWLOCATION]) def test_pycurl_follow_false(self): curl = CurlStub("result") result = fetch("http://example.com", curl=curl, follow=False) self.assertEqual(result, "result") self.assertNotIn(pycurl.FOLLOWLOCATION, curl.options.keys()) def test_create_curl(self): curls = [] def pycurl_Curl(): curl = CurlStub("result") curls.append(curl) return curl Curl = pycurl.Curl try: pycurl.Curl = pycurl_Curl result = fetch("http://example.com") curl = curls[0] self.assertEqual(result, "result") self.assertEqual(curl.options, {pycurl.URL: "http://example.com", pycurl.FOLLOWLOCATION: 1, pycurl.MAXREDIRS: 5, pycurl.CONNECTTIMEOUT: 30, pycurl.LOW_SPEED_LIMIT: 1, pycurl.LOW_SPEED_TIME: 600, pycurl.NOSIGNAL: 1, pycurl.WRITEFUNCTION: Any(), pycurl.DNS_CACHE_TIMEOUT: 0, pycurl.ENCODING: "gzip,deflate"}) finally: pycurl.Curl = Curl def test_async_fetch(self): curl = CurlStub("result") d = fetch_async("http://example.com/", curl=curl) def got_result(result): self.assertEqual(result, "result") return d.addCallback(got_result) def test_async_fetch_with_error(self): curl = CurlStub("result", {pycurl.HTTP_CODE: 501}) d = fetch_async("http://example.com/", curl=curl) def got_error(failure): self.assertEqual(failure.value.http_code, 501) self.assertEqual(failure.value.body, "result") return failure d.addErrback(got_error) self.assertFailure(d, HTTPCodeError) return d def test_fetch_many_async(self): """ L{fetch_many_async} retrieves multiple URLs, and returns a C{DeferredList} firing its callback when all the URLs have successfully completed. """ url_results = {"http://good/": "good", "http://better/": "better"} def callback(result, url): self.assertIn(result, url_results.values()) self.assertIn(url, url_results) url_results.pop(url) def errback(failure, url): self.fail() curl = CurlManyStub(url_results) d = fetch_many_async(url_results.keys(), callback=callback, errback=errback, curl=curl) def completed(result): self.assertEqual(url_results, {}) return d.addCallback(completed) def test_fetch_many_async_with_error(self): """ L{fetch_many_async} aborts as soon as one URL fails. """ url_results = {"http://right/": "right", "http://wrong/": ("wrong", 501), "http://impossible/": "impossible"} failed_urls = [] def errback(failure, url): failed_urls.append(url) self.assertEqual(failure.value.body, "wrong") self.assertEqual(failure.value.http_code, 501) return failure curl = CurlManyStub(url_results) urls = ["http://right/", "http://wrong/", "http://impossible/"] result = fetch_many_async(urls, callback=None, errback=errback, curl=curl) def check_failure(failure): self.assertTrue(isinstance(failure.subFailure.value, HTTPCodeError)) self.assertEqual(failed_urls, ["http://wrong/"]) self.assertFailure(result, FirstError) return result.addCallback(check_failure) def test_url_to_filename(self): """ L{url_to_filename} extracts the filename part of an URL, optionally prepending a directory path to it. """ self.assertEqual(url_to_filename("http://some/file"), "file") self.assertEqual(url_to_filename("http://some/file/"), "file") self.assertEqual(url_to_filename("http://some/file", directory="dir"), os.path.join("dir", "file")) def test_fetch_to_files(self): """ L{fetch_to_files} fetches a list of URLs and save their content in the given directory. """ url_results = {"http://good/file": "file", "http://even/better-file": "better-file"} directory = self.makeDir() curl = CurlManyStub(url_results) result = fetch_to_files(url_results.keys(), directory, curl=curl) def check_files(ignored): for result in url_results.itervalues(): fd = open(os.path.join(directory, result)) self.assertEqual(fd.read(), result) fd.close() result.addCallback(check_files) return result def test_fetch_to_files_with_trailing_slash(self): """ L{fetch_to_files} discards trailing slashes from the final component of the given URLs when saving them as files. """ directory = self.makeDir() curl = CurlStub("data") result = fetch_to_files(["http:///with/slash/"], directory, curl=curl) def check_files(ignored): os.path.exists(os.path.join(directory, "slash")) result.addCallback(check_files) return result def test_fetch_to_files_with_errors(self): """ L{fetch_to_files} optionally logs an error message as soon as one URL fails, and aborts. """ url_results = {"http://im/right": "right", "http://im/wrong": ("wrong", 404), "http://im/not": "not"} directory = self.makeDir() messages = [] logger = lambda message: messages.append(message) curl = CurlManyStub(url_results) result = fetch_to_files(url_results.keys(), directory, logger=logger, curl=curl) def check_messages(failure): self.assertEqual(len(messages), 1) self.assertEqual(messages[0], "Couldn't fetch file from http://im/wrong " "(Server returned HTTP code 404)") messages.pop() def check_files(ignored): self.assertEqual(messages, []) self.assertFalse(os.path.exists(os.path.join(directory, "wrong"))) result.addErrback(check_messages) result.addCallback(check_files) return result def test_fetch_to_files_with_non_existing_directory(self): """ The deferred list returned by L{fetch_to_files} results in a failure if the destination directory doesn't exist. """ url_results = {"http://im/right": "right"} directory = "i/dont/exist/" curl = CurlManyStub(url_results) result = fetch_to_files(url_results.keys(), directory, curl=curl) def check_error(failure): error = str(failure.value.subFailure.value) self.assertEqual(error, "[Errno 2] No such file or directory: " "'i/dont/exist/right'") self.assertFalse(os.path.exists(os.path.join(directory, "right"))) result.addErrback(check_error) return result landscape-client-14.01/landscape/lib/tests/test_fd.py0000644000175000017500000000462312301414317022406 0ustar andreasandreas"""Tests for L{landscape.lib.fd}""" import resource from landscape.tests.mocker import ANY from landscape.lib.fd import clean_fds from landscape.tests.helpers import LandscapeTest class CleanFDsTests(LandscapeTest): """Tests for L{clean_fds}.""" def mock_rlimit(self, limit): getrlimit_mock = self.mocker.replace("resource.getrlimit") getrlimit_mock(resource.RLIMIT_NOFILE) self.mocker.result([None, limit]) def test_clean_fds_rlimit(self): """ L{clean_fds} cleans all non-stdio file descriptors up to the process limit for file descriptors. """ self.mocker.order() self.mock_rlimit(10) close_mock = self.mocker.replace("os.close", passthrough=False) for i in range(3, 10): close_mock(i) self.mocker.replay() clean_fds() def test_clean_fds_sanity(self): """ If the process limit for file descriptors is very high (> 4096), then we only close 4096 file descriptors. """ self.mocker.order() self.mock_rlimit(4100) close_mock = self.mocker.replace("os.close", passthrough=False) closed_fds = [] close_mock(ANY) self.mocker.call(closed_fds.append) self.mocker.count(4093) self.mocker.replay() clean_fds() self.assertEqual(closed_fds, range(3, 4096)) def test_ignore_OSErrors(self): """ If os.close raises an OSError, it is ignored and we continue to close the rest of the FDs. """ self.mocker.order() self.mock_rlimit(10) closed_fds = [] def remember_and_throw(fd): closed_fds.append(fd) raise OSError("Bad FD!") close_mock = self.mocker.replace("os.close", passthrough=False) close_mock(ANY) self.mocker.count(7) self.mocker.call(remember_and_throw) self.mocker.replay() clean_fds() self.assertEqual(closed_fds, range(3, 10)) def test_dont_ignore_other_errors(self): """ If other errors are raised from os.close, L{clean_fds} propagates them. """ self.mocker.order() self.mock_rlimit(10) close_mock = self.mocker.replace("os.close", passthrough=False) close_mock(ANY) self.mocker.throw(MemoryError()) self.mocker.replay() self.assertRaises(MemoryError, clean_fds) landscape-client-14.01/landscape/lib/tests/test_network.py0000644000175000017500000003000712301414317023501 0ustar andreasandreasimport array from cStringIO import StringIO import socket from subprocess import Popen, PIPE from landscape.tests.helpers import LandscapeTest from landscape.lib.network import ( get_network_traffic, get_active_device_info, get_active_interfaces, get_fqdn, get_network_interface_speed) from landscape.tests.mocker import ANY class NetworkInfoTest(LandscapeTest): def test_get_active_device_info(self): """ Device info returns a sequence of information about active network devices, compare and verify the output against that returned by ifconfig. """ mock_get_network_interface_speed = self.mocker.replace( get_network_interface_speed) mock_get_network_interface_speed(ANY, ANY) self.mocker.result((100, True)) self.mocker.count(min=1, max=None) self.mocker.replay() device_info = get_active_device_info() result = Popen(["/sbin/ifconfig"], stdout=PIPE).communicate()[0] interface_blocks = dict( [(block.split()[0], block.upper()) for block in filter(None, result.split("\n\n"))]) for device in device_info: if device["mac_address"] == "00:00:00:00:00:00": continue self.assertTrue(device["interface"] in result) block = interface_blocks[device["interface"]] self.assertTrue(device["netmask"] in block) self.assertIn(device["ip_address"], block) self.assertIn(device["mac_address"].upper(), block) self.assertIn(device["broadcast_address"], block) flags = device["flags"] if flags & 1: self.assertIn("UP", block) if flags & 2: self.assertIn("BROADCAST", block) if flags & 64: self.assertIn("RUNNING", block) if flags & 4096: self.assertIn("MULTICAST", block) self.assertEqual(100, device["speed"]) self.assertEqual(True, device["duplex"]) def test_skip_loopback(self): """The C{lo} interface is not reported by L{get_active_device_info}.""" device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("lo", interfaces) def test_skip_vlan(self): """VLAN interfaces are not reported by L{get_active_device_info}.""" mock_get_active_interfaces = self.mocker.replace(get_active_interfaces) mock_get_active_interfaces(ANY) self.mocker.passthrough( result_callback=lambda result: list(result) + ["eth0.1"]) self.mocker.replay() device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("eth0.1", interfaces) def test_skip_alias(self): """Interface aliases are not reported by L{get_active_device_info}.""" mock_get_active_interfaces = self.mocker.replace(get_active_interfaces) mock_get_active_interfaces(ANY) self.mocker.passthrough( result_callback=lambda result: list(result) + ["eth0:foo"]) self.mocker.replay() device_info = get_active_device_info() interfaces = [i["interface"] for i in device_info] self.assertNotIn("eth0:foo", interfaces) def test_duplicate_network_interfaces(self): """ L{get_active_interfaces} doesn't return duplicate network interfaces. The call to C{fcntl.ioctl} might return the same interface several times, so we make sure to clean it up. """ import landscape.lib.network original_struct_size = landscape.lib.network.IF_STRUCT_SIZE landscape.lib.network.IF_STRUCT_SIZE = 40 self.addCleanup( setattr, landscape.lib.network, "IF_STRUCT_SIZE", original_struct_size) # This is a fake response observed to return the same interface several # times (here, br1:priv) response = ( "lo\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" "\x00\x00\x00\x7f\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00eth1:pub\x00\x00\x00\x00\x00\x00\x00" "\x00\x02\x00\x00\x00\xc8\xb4\xc4.\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00br1:metadata\x00\x00\x00\x00\x02" "\x00\x00\x00\xa9\xfe\xa9\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00br1:0\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x02\x00\x00\x00\xc9\x19\x1f\x1d\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00br1\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\xc0\xa8d" "\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00br1:priv\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\xac" "\x13\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00br1:priv\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00" "\x00\xac\x13\x02A") fake_array = array.array("B", response + "\0" * 4855) mock_array = self.mocker.replace("array") mock_array.array("B", ANY) self.mocker.result(fake_array) mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.result(0) mock_unpack = self.mocker.replace("struct") mock_unpack.unpack("iL", ANY) self.mocker.result((280, 38643456)) self.mocker.replay() sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) self.assertEqual( ["lo", "eth1:pub", "br1:metadata", "br1:0", "br1", "br1:priv"], list(get_active_interfaces(sock))) def test_get_network_traffic(self): """ Network traffic is assessed via reading /proc/net/dev, verify the parsed output against a known sample. """ open_mock = self.mocker.replace("__builtin__.open") open_mock("/proc/net/dev", "r") self.mocker.result(StringIO(test_proc_net_dev_output)) self.mocker.replay() traffic = get_network_traffic() self.assertEqual(traffic, test_proc_net_dev_parsed) #exact output of cat /proc/net/dev snapshot with line continuations for pep8 test_proc_net_dev_output = """\ Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes \ packets errs drop fifo colls carrier compressed lo:3272627934 3321049 0 0 0 0 0 0 3272627934\ 3321049 0 0 0 0 0 0 eth0: 6063748 12539 0 0 0 0 0 62 2279693\ 12579 0 0 0 19 0 0 """ test_proc_net_dev_parsed = { "lo": {"recv_bytes": 3272627934, "recv_packets": 3321049, "recv_errs": 0, "recv_drop": 0, "recv_fifo": 0, "recv_frame": 0, "recv_compressed": 0, "recv_multicast": 0, "send_bytes": 3272627934, "send_packets": 3321049, "send_errs": 0, "send_drop": 0, "send_fifo": 0, "send_colls": 0, "send_carrier": 0, "send_compressed": 0}, "eth0": {"recv_bytes": 6063748, "recv_packets": 12539, "recv_errs": 0, "recv_drop": 0, "recv_fifo": 0, "recv_frame": 0, "recv_compressed": 0, "recv_multicast": 62, "send_bytes": 2279693, "send_packets": 12579, "send_errs": 0, "send_drop": 0, "send_fifo": 0, "send_colls": 19, "send_carrier": 0, "send_compressed": 0}} class FQDNTest(LandscapeTest): def test_default_fqdn(self): """ C{get_fqdn} returns the output of C{socket.getfqdn} if it returns something sensible. """ self.addCleanup(setattr, socket, "getfqdn", socket.getfqdn) socket.getfqdn = lambda: "foo.bar" self.assertEqual("foo.bar", get_fqdn()) def test_getaddrinfo_fallback(self): """ C{get_fqdn} falls back to C{socket.getaddrinfo} with the C{AI_CANONNAME} flag if C{socket.getfqdn} returns a local hostname. """ self.addCleanup(setattr, socket, "getfqdn", socket.getfqdn) socket.getfqdn = lambda: "localhost6.localdomain6" self.assertNotIn("localhost", get_fqdn()) class NetworkInterfaceSpeedTest(LandscapeTest): def test_get_network_interface_speed(self): """ The link speed is reported as unpacked from the ioctl() call. """ sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) # ioctl always succeeds mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.result(0) mock_unpack = self.mocker.replace("struct") mock_unpack.unpack("12xHB28x", ANY) self.mocker.result((100, False)) self.mocker.replay() self.assertEqual((100, False), get_network_interface_speed(sock, "eth0")) def test_get_network_interface_speed_unplugged(self): """ The link speed for an unplugged interface is reported as 0. """ sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) # ioctl always succeeds mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.result((0, False)) mock_unpack = self.mocker.replace("struct") mock_unpack.unpack("12xHB28x", ANY) self.mocker.result((65535, False)) self.mocker.replay() self.assertEqual((0, False), get_network_interface_speed(sock, "eth0")) def test_get_network_interface_speed_not_supported(self): """ Some drivers do not report the needed interface speed. In this case an C{IOError} with errno 95 is raised ("Operation not supported"). If such an error is rasied, report the speed as -1. """ sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) theerror = IOError() theerror.errno = 95 theerror.message = "Operation not supported" # ioctl always raises mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.throw(theerror) self.mocker.replay() self.assertEqual((-1, False), get_network_interface_speed(sock, "eth0")) def test_get_network_interface_speed_not_permitted(self): """ In some cases (lucid seems to be affected), the ioctl() call is not allowed for non-root users. In that case we intercept the error and not report the network speed. """ sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) theerror = IOError() theerror.errno = 1 theerror.message = "Operation not permitted" # ioctl always raises mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.throw(theerror) self.mocker.replay() self.assertEqual((-1, False), get_network_interface_speed(sock, "eth0")) def test_get_network_interface_speed_other_io_error(self): """ In case we get an IOError that is not "Operation not permitted", the exception should be raised. """ sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) theerror = IOError() theerror.errno = 999 theerror.message = "Whatever" # ioctl always raises mock_ioctl = self.mocker.replace("fcntl") mock_ioctl.ioctl(ANY, ANY, ANY) self.mocker.throw(theerror) self.mocker.replay() self.assertRaises(IOError, get_network_interface_speed, sock, "eth0") landscape-client-14.01/landscape/lib/tests/test_fs.py0000644000175000017500000000634212301414317022425 0ustar andreasandreasimport os import time from landscape.tests.helpers import LandscapeTest from landscape.lib.fs import append_file, read_file, touch_file class ReadFileTest(LandscapeTest): def test_read_file(self): """ With no options L{read_file} reads the whole file passed as argument. """ path = self.makeFile("foo") self.assertEqual(read_file(path), "foo") def test_read_file_with_limit(self): """ With a positive limit L{read_file} reads only the bytes after the given limit. """ path = self.makeFile("foo bar") self.assertEqual(read_file(path, limit=3), " bar") def test_read_file_with_negative_limit(self): """ With a negative limit L{read_file} reads only the tail of the file. """ path = self.makeFile("foo bar from end") self.assertEqual(read_file(path, limit=-3), "end") def test_read_file_with_limit_bigger_than_file(self): """ If the limit is bigger than the file L{read_file} reads the entire file. """ path = self.makeFile("foo bar from end") self.assertEqual(read_file(path, limit=100), "foo bar from end") self.assertEqual(read_file(path, limit=-100), "foo bar from end") class TouchFileTest(LandscapeTest): def test_touch_file(self): """ The L{touch_file} function touches a file, setting its last modification time. """ path = self.makeFile() utime_mock = self.mocker.replace("os.utime") self.expect(utime_mock(path, None)) self.mocker.replay() touch_file(path) self.assertFileContent(path, "") def test_touch_file_multiple_times(self): """ The L{touch_file} function can be called multiple times. """ path = self.makeFile() touch_file(path) touch_file(path) self.assertFileContent(path, "") def test_touch_file_with_offset_seconds(self): """ The L{touch_file} function can be called with a offset in seconds that will be reflected in the access and modification times of the file. """ path = self.makeFile() current_time = long(time.time()) expected_time = current_time - 1 time_mock = self.mocker.replace("time.time") self.expect(time_mock()) self.mocker.result(current_time) utime_mock = self.mocker.replace("os.utime") self.expect(utime_mock(path, (expected_time, expected_time))) self.mocker.replay() touch_file(path, offset_seconds=-1) self.assertFileContent(path, "") class AppendFileTest(LandscapeTest): def test_append_existing_file(self): """ The L{append_file} function appends contents to an existing file. """ existing_file = self.makeFile("foo bar") append_file(existing_file, " baz") self.assertFileContent(existing_file, "foo bar baz") def test_append_no_file(self): """ The L{append_file} function creates a new file if one doesn't exist already. """ new_file = os.path.join(self.makeDir(), "new_file") append_file(new_file, "contents") self.assertFileContent(new_file, "contents") landscape-client-14.01/landscape/lib/dns.py0000644000175000017500000000513212301414317020374 0ustar andreasandreas"""DNS lookups for server autodiscovery.""" import logging from twisted.names import dns from twisted.names.client import Resolver def discover_server(autodiscover_srv_query_string="", autodiscover_a_query_string="", resolver=None): """ Look up the dns location of the landscape server. @param autodiscover_srv_query_string: The query string to send to the DNS server when making a SRV query. @param autodiscover_a_query_string: The query string to send to the DNS server when making a A query. @type resolver: The resolver to use. If none is specified a resolver that uses settings from /etc/resolv.conf will be created. (Testing only) """ if not resolver: resolver = Resolver("/etc/resolv.conf") d = _lookup_server_record(resolver, autodiscover_srv_query_string) d.addErrback(_lookup_hostname, resolver, autodiscover_a_query_string) return d def _lookup_server_record(resolver, service_name): """ Do a DNS SRV record lookup for the location of the landscape server. @type resolver: A resolver to use for DNS lookups L{twisted.names.client.Resolver}. @param service_name: The query string to send to the DNS server when making a SRV query. @return: A deferred containing either the hostname of the landscape server if found or an empty string if not found. """ def lookup_done(result): name = "" for item in result: for row in item: if row.type == dns.SRV: name = row.payload.target.name break return name def lookup_failed(result): logging.info("SRV lookup of %s failed." % service_name) return result d = resolver.lookupService(service_name) d.addCallback(lookup_done) d.addErrback(lookup_failed) return d def _lookup_hostname(result, resolver, hostname): """ Do a DNS name lookup for the location of the landscape server. @param result: The result from a call to lookup_server_record. @param resolver: The resolver to use for DNS lookups. @param hostname: The query string to send to the DNS server when making a A query. @param return: A deferred containing the ip address of the landscape server if found or None if not found. """ def lookup_done(result): return result def lookup_failed(result): logging.info("Name lookup of %s failed." % hostname) return result d = resolver.getHostByName(hostname) d.addCallback(lookup_done) d.addErrback(lookup_failed) return d landscape-client-14.01/landscape/lib/bpickle_dbus.py0000644000175000017500000000430312301414317022235 0ustar andreasandreas""" Different versions of the Python DBus bindings return different types to represent integers, strings, lists, etc. Older versions return builtin Python types: C{int}, C{str}, C{list}, etc. Newer versions return DBus-specific wrappers: C{Int16}, C{String}, C{Array}, etc. Failures occur when DBus types are used because bpickle doesn't know that an C{Int16} is really an C{int} and that an C{Array} is really a C{list}. L{install} and L{uninstall} can install and remove extensions that make bpickle work with DBus types. """ import dbus from landscape.lib import bpickle def install(): """Install bpickle extensions for DBus types.""" for type, function in get_dbus_types(): bpickle.dumps_table[type] = function def uninstall(): """Uninstall bpickle extensions for DBus types.""" for type, function in get_dbus_types(): del bpickle.dumps_table[type] def dumps_utf8string(obj): """ Convert the specified L{dbus.types.UTF8String} to bpickle's representation for C{unicode} data. """ return "u%s:%s" % (len(obj), obj) def dumps_double(obj): """ Convert a dbus.types.Double into a floating point representation. """ return "f%r;" % float(obj) def get_dbus_types(): """ Generator yields C{(type, bpickle_function)} for available DBus types. """ for (type_name, function) in [("Boolean", bpickle.dumps_bool), ("Int16", bpickle.dumps_int), ("UInt16", bpickle.dumps_int), ("Int32", bpickle.dumps_int), ("UInt32", bpickle.dumps_int), ("Int64", bpickle.dumps_int), ("UInt64", bpickle.dumps_int), ("Double", dumps_double), ("Array", bpickle.dumps_list), ("Dictionary", bpickle.dumps_dict), ("String", bpickle.dumps_unicode), ("UTF8String", dumps_utf8string)]: type = getattr(dbus.types, type_name, None) if type is not None: yield type, function landscape-client-14.01/landscape/lib/timestamp.py0000644000175000017500000000035112301414317021611 0ustar andreasandreasfrom datetime import datetime def to_timestamp(date, epoch=datetime.utcfromtimestamp(0)): """Convert a C{datetime} to an C{int}-based timetamp.""" delta = date - epoch return (delta.days * 60 * 60 * 24) + delta.seconds landscape-client-14.01/landscape/lib/persist.py0000644000175000017500000005061012301414317021302 0ustar andreasandreas# # Copyright (c) 2006 Canonical # Copyright (c) 2004 Conectiva, Inc. # # Written by Gustavo Niemeyer # # This Python module is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This Python module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this Python module; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os import sys import copy import re __all__ = ["Persist", "PickleBackend", "BPickleBackend", "path_string_to_tuple", "path_tuple_to_string", "RootedPersist", "PersistError", "PersistReadOnlyError"] NOTHING = object() class PersistError(Exception): pass class PersistReadOnlyError(PersistError): pass class Persist(object): """Persist a hierarchical database of key=>value pairs. There are three different kinds of option maps, regarding the persistence and priority that maps are queried. - hard - Options are persistent. - soft - Options are not persistent, and have a higher priority than persistent options. - weak - Options are not persistent, and have a lower priority than persistent options. @ivar filename: The name of the file where persist data is saved or None if no filename is available. """ def __init__(self, backend=None, filename=None): """ @param backend: The backend to use. If none is specified, L{BPickleBackend} will be used. @param filename: The default filename to save to and load from. If specified, and the file exists, it will be immediately loaded. Specifying this will also allow L{save} to be called without any arguments to save the persist. """ if backend is None: backend = BPickleBackend() self._backend = backend self._hardmap = backend.new() self._softmap = {} self._weakmap = {} self._readonly = False self._modified = False self._config = self self.filename = filename if filename is not None and os.path.exists(filename): self.load(filename) def _get_readonly(self): return self._readonly def _set_readonly(self, flag): self._readonly = bool(flag) def _get_modified(self): return self._modified readonly = property(_get_readonly, _set_readonly) modified = property(_get_modified) def reset_modified(self): """Set the database status as non-modified.""" self._modified = False def assert_writable(self): """Assert if the object is writable @raise: L{PersistReadOnlyError} """ if self._readonly: raise PersistReadOnlyError("Configuration is in readonly mode.") def load(self, filepath): """Load a persisted database.""" def load_old(): filepathold = filepath + ".old" if (os.path.isfile(filepathold) and os.path.getsize(filepathold) > 0): # warning("Broken configuration file at %s" % filepath) # warning("Trying backup at %s" % filepathold) try: self._hardmap = self._backend.load(filepathold) except: raise PersistError("Broken configuration file at %s" % filepathold) return True return False filepath = os.path.expanduser(filepath) if not os.path.isfile(filepath): if load_old(): return raise PersistError("File not found: %s" % filepath) if os.path.getsize(filepath) == 0: load_old() return try: self._hardmap = self._backend.load(filepath) except: if load_old(): return raise PersistError("Broken configuration file at %s" % filepath) def save(self, filepath=None): """Save the persist to the given C{filepath}. If None is specified, then the filename passed during construction will be used. If the destination file already exists, it will be renamed to C{.old}. """ if filepath is None: if self.filename is None: raise PersistError("Need a filename!") filepath = self.filename filepath = os.path.expanduser(filepath) if os.path.isfile(filepath): os.rename(filepath, filepath + ".old") dirname = os.path.dirname(filepath) if dirname and not os.path.isdir(dirname): os.makedirs(dirname) self._backend.save(filepath, self._hardmap) def _traverse(self, obj, path, default=NOTHING, setvalue=NOTHING): if setvalue is not NOTHING: setvalue = self._backend.copy(setvalue) queue = list(path) marker = NOTHING newobj = obj while queue: obj = newobj elem = queue.pop(0) newobj = self._backend.get(obj, elem) if newobj is NotImplemented: if queue: path = path[:-len(queue)] raise PersistError("Can't traverse %r (%r): %r" % (type(obj), path_tuple_to_string(path), str(obj))) if newobj is marker: break if newobj is not marker: if setvalue is not marker: newobj = self._backend.set(obj, elem, setvalue) else: if setvalue is marker: newobj = default else: while True: if len(queue) > 0: if type(queue[0]) is int: newvalue = [] else: newvalue = {} else: newvalue = setvalue newobj = self._backend.set(obj, elem, newvalue) if newobj is NotImplemented: raise PersistError("Can't traverse %r with %r" % (type(obj), type(elem))) if not queue: break obj = newobj elem = queue.pop(0) return newobj def _getvalue(self, path, soft=False, hard=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) marker = NOTHING if soft: value = self._traverse(self._softmap, path, marker) elif hard: value = self._traverse(self._hardmap, path, marker) elif weak: value = self._traverse(self._weakmap, path, marker) else: value = self._traverse(self._softmap, path, marker) if value is marker: value = self._traverse(self._hardmap, path, marker) if value is marker: value = self._traverse(self._weakmap, path, marker) return value def has(self, path, value=NOTHING, soft=False, hard=False, weak=False): obj = self._getvalue(path, soft, hard, weak) marker = NOTHING if obj is marker: return False elif value is marker: return True result = self._backend.has(obj, value) if result is NotImplemented: raise PersistError("Can't check %r for containment" % type(obj)) return result def keys(self, path, soft=False, hard=False, weak=False): obj = self._getvalue(path, soft, hard, weak) if obj is NOTHING: return [] result = self._backend.keys(obj) if result is NotImplemented: raise PersistError("Can't return keys for %s" % type(obj)) return result def get(self, path, default=None, soft=False, hard=False, weak=False): value = self._getvalue(path, soft, hard, weak) if value is NOTHING: return default return self._backend.copy(value) def set(self, path, value, soft=False, weak=False): assert path if type(path) is str: path = path_string_to_tuple(path) if soft: map = self._softmap elif weak: map = self._weakmap else: self.assert_writable() self._modified = True map = self._hardmap self._traverse(map, path, setvalue=value) def add(self, path, value, unique=False, soft=False, weak=False): assert path if type(path) is str: path = path_string_to_tuple(path) if soft: map = self._softmap elif weak: map = self._weakmap else: self.assert_writable() self._modified = True map = self._hardmap if unique: current = self._traverse(map, path) if type(current) is list and value in current: return path = path + (sys.maxint,) self._traverse(map, path, setvalue=value) def remove(self, path, value=NOTHING, soft=False, weak=False): assert path if type(path) is str: path = path_string_to_tuple(path) if soft: map = self._softmap elif weak: map = self._weakmap else: self.assert_writable() self._modified = True map = self._hardmap marker = NOTHING while path: if value is marker: obj = self._traverse(map, path[:-1]) elem = path[-1] isvalue = False else: obj = self._traverse(map, path) elem = value isvalue = True result = False if obj is not marker: result = self._backend.remove(obj, elem, isvalue) if result is NotImplemented: raise PersistError("Can't remove %r from %r" % (elem, type(obj))) if self._backend.empty(obj): if value is not marker: value = marker else: path = path[:-1] else: break return result def move(self, oldpath, newpath, soft=False, weak=False): if not (soft or weak): self.assert_writable() if type(oldpath) is str: oldpath = path_string_to_tuple(oldpath) if type(newpath) is str: newpath = path_string_to_tuple(newpath) result = False marker = NOTHING value = self._getvalue(oldpath, soft, not (soft or weak), weak) if value is not marker: self.remove(oldpath, soft=soft, weak=weak) self.set(newpath, value, weak, soft) result = True return result def root_at(self, path): """ Rebase the database hierarchy. @return: A L{RootedPersist} using this L{Persist} as parent. """ return RootedPersist(self, path) class RootedPersist(object): """Root a L{Persist}'s tree at a particular branch. This class shares the same interface of L{Persist} and provides a shortcut to access the nodes of a particular branch in a L{Persist}'s tree. The chosen branch will be viewed as the root of the tree of the L{RootedPersist} and all operations will be forwarded to the parent L{Persist} as appropriate. """ def __init__(self, parent, root): """ @param parent: the parent L{Persist}. @param root: a branch of the parent L{Persist}'s tree, that will be used as root of this L{RootedPersist}. """ self.parent = parent if type(root) is str: self.root = path_string_to_tuple(root) else: self.root = root readonly = property(lambda self: self.parent.readonly) modified = property(lambda self: self.parent.modified) def assert_writable(self): self.parent.assert_writable() def has(self, path, value=NOTHING, soft=False, hard=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.has(self.root + path, value, soft, hard, weak) def keys(self, path, soft=False, hard=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.keys(self.root + path, soft, hard, weak) def get(self, path, default=None, soft=False, hard=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.get(self.root + path, default, soft, hard, weak) def set(self, path, value, soft=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.set(self.root + path, value, soft, weak) def add(self, path, value, unique=False, soft=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.add(self.root + path, value, unique, soft, weak) def remove(self, path, value=NOTHING, soft=False, weak=False): if type(path) is str: path = path_string_to_tuple(path) return self.parent.remove(self.root + path, value, soft, weak) def move(self, oldpath, newpath, soft=False, weak=False): if type(oldpath) is str: oldpath = path_string_to_tuple(oldpath) if type(newpath) is str: newpath = path_string_to_tuple(newpath) return self.parent.move(self.root + oldpath, self.root + newpath, soft, weak) def root_at(self, path): if type(path) is str: path = path_string_to_tuple(path) return self.parent.root_at(self.root + path) _splitpath = re.compile(r"(\[-?\d+\])|(?>> path_string_to_tuple("ab") ("ab",) >>> path_string_to_tuple("ab.cd") ("ab", "cd")) >>> path_string_to_tuple("ab[0][1]") ("ab", 0, 1) >>> path_string_to_tuple("ab[0].cd[1]") ("ab", 0, "cd", 1) Raises L{PersistError} if the given path string is invalid. """ if "." not in path and "[" not in path: return (path,) result = [] tokens = _splitpath(path) for token in tokens: if token: if token[0] == "[" and token[-1] == "]": try: result.append(int(token[1:-1])) except ValueError: raise PersistError("Invalid path index: %r" % token) else: result.append(token.replace(r"\.", ".")) return tuple(result) def path_tuple_to_string(path): result = [] for elem in path: if type(elem) is int: result[-1] += "[%d]" % elem else: result.append(str(elem).replace(".", "\.")) return ".".join(result) class Backend(object): """ Base class for L{Persist} backends implementing hierarchical storage functionality. Each node of the hierarchy is an object of type C{dict}, C{list} or C{tuple}. A node can have zero or more children, each child can be another node or a leaf value compatible with the backend's serialization mechanism. Each child element is associated with a unique key, that can be used to get, set or remove the child itself from its containing node. If the node object is of type C{dict}, then the child keys will be the keys of the dictionary, otherwise if the node object is of type C{list} or C{tuple} the child element keys are the indexes of the available items, or the value of items theselves. The root node object is always a C{dict}. For example: >>> backend = Backend() >>> root = backend.new() >>> backend.set(root, "foo", "bar") 'bar' >>> egg = backend.set(root, "egg", [1, 2, 3]) >>> backend.set(egg, 0, 10) 10 >>> root {'foo': 'bar', 'egg': [10, 2, 3]} """ def new(self): raise NotImplementedError def load(self, filepath): raise NotImplementedError def save(self, filepath, map): raise NotImplementedError def get(self, obj, elem, _marker=NOTHING): """Lookup a child in the given node object.""" if type(obj) is dict: newobj = obj.get(elem, _marker) elif type(obj) in (tuple, list): if type(elem) is int: try: newobj = obj[elem] except IndexError: newobj = _marker elif elem in obj: newobj = elem else: newobj = _marker else: newobj = NotImplemented return newobj def set(self, obj, elem, value): """Set the value of the given child in the given node object.""" if type(obj) is dict: newobj = obj[elem] = value elif type(obj) is list and type(elem) is int: lenobj = len(obj) if lenobj <= elem: obj.append(None) elem = lenobj elif elem < 0 and abs(elem) > lenobj: obj.insert(0, None) elem = 0 newobj = obj[elem] = value else: newobj = NotImplemented return newobj def remove(self, obj, elem, isvalue): """Remove a the given child in the given node object. @param isvalue: In case the node object is a C{list}, a boolean indicating if C{elem} is the index of the child or the value of the child itself. """ result = False if type(obj) is dict: if elem in obj: del obj[elem] result = True elif type(obj) is list: if not isvalue and type(elem) is int: try: del obj[elem] result = True except IndexError: pass elif elem in obj: obj[:] = [x for x in obj if x != elem] result = True else: result = NotImplemented return result def copy(self, value): """Copy a node or a value.""" if type(value) in (dict, list): return copy.deepcopy(value) return value def empty(self, obj): """Whether the given node object has no children.""" return (not obj) def has(self, obj, elem): """Whether the given node object contains the given child element.""" contains = getattr(obj, "__contains__", None) if contains: return contains(elem) return NotImplemented def keys(self, obj): """Return the keys of the child elements of the given node object.""" keys = getattr(obj, "keys", None) if keys: return keys() elif type(obj) is list: return range(len(obj)) return NotImplemented class PickleBackend(Backend): def __init__(self): import cPickle self._pickle = cPickle def new(self): return {} def load(self, filepath): file = open(filepath) try: return self._pickle.load(file) finally: file.close() def save(self, filepath, map): file = open(filepath, "w") try: self._pickle.dump(map, file, 2) finally: file.close() class BPickleBackend(Backend): def __init__(self): from landscape.lib import bpickle self._bpickle = bpickle def new(self): return {} def load(self, filepath): file = open(filepath) try: return self._bpickle.loads(file.read()) finally: file.close() def save(self, filepath, map): file = open(filepath, "w") try: file.write(self._bpickle.dumps(map)) finally: file.close() # vim:ts=4:sw=4:et landscape-client-14.01/landscape/lib/__init__.py0000644000175000017500000000030612301414317021345 0ustar andreasandreas""" This package contains the Landscape library. Code inside this package must be generic and self-contained, depending only on code inside of the package itself or libraries outside Landscape. """ landscape-client-14.01/landscape/lib/monitor.py0000644000175000017500000001421112301414317021275 0ustar andreasandreasimport logging import time from landscape.log import format_delta, format_percent class Timer(object): """ A timer keeps track of the number of seconds passed during it's lifetime and since the last reset. """ def __init__(self, create_time=None): self._create_time = create_time or time.time self._creation_time = self._create_time() self._last_time = self._creation_time def time(self): return self._create_time() def since_start(self): return self._create_time() - self._creation_time def since_reset(self): return self._create_time() - self._last_time def reset(self): self._last_time = self._create_time() class Monitor(Timer): """ A monitor tracks the number of pings it received during it's lifetime and since the last reset. The component being monitored is responsible for calling C{ping()} everytime a monitored activity occurs. It should register a reactor event that logs statistics from this monitor every N seconds. Essentially, monitors are just statistics checkers that components can use to monitor themselves. """ def __init__(self, event_name, create_time=None): super(Monitor, self).__init__(create_time=create_time) self.event_name = event_name self.count = 0 self.total_count = 0 def ping(self): self.count += 1 self.total_count += 1 def reset(self): super(Monitor, self).reset() self.count = 0 def log(self): logging.info("%d %s events occurred in the last %s.", self.count, self.event_name, format_delta(self.since_reset())) self.reset() class BurstMonitor(Monitor): """ A burst monitor tracks the volume pings it receives. It goes into warn mode when too many pings are received in a short period of time. """ def __init__(self, repeat_interval, maximum_count, event_name, create_time=None): super(BurstMonitor, self).__init__(event_name, create_time=create_time) self.repeat_interval = repeat_interval self.maximum_count = maximum_count self._last_times = [] def ping(self): super(BurstMonitor, self).ping() now = self.time() self._last_times.append(now) if (self._last_times[0] - now > self.repeat_interval or len(self._last_times) > self.maximum_count+1): self._last_times.pop(0) def warn(self): if not self._last_times: return False delta = self.time() - self._last_times[0] return (delta < self.repeat_interval and len(self._last_times) >= self.maximum_count+1) class CoverageMonitor(Monitor): """ A coverage monitor tracks the volume of pings received since the last reset. It has normal and warn states that are determined by calculating the number of expected pings since the last reset. If the actual number of pings falls below the minimum required percent the monitor goes into warn mode. The component being monitored should register a reactor event that logs statistics from this monitor every N seconds. """ def __init__(self, repeat_interval, min_percent, event_name, create_time=None): super(CoverageMonitor, self).__init__(event_name, create_time=create_time) self.repeat_interval = repeat_interval self.min_percent = min_percent @property def percent(self): if not self.count and not self.expected_count: return 1.0 elapsed_time = self.since_reset() if not elapsed_time: return 1.0 return self.count / float(self.expected_count) @property def expected_count(self): return int(self.since_reset() / self.repeat_interval) def log(self): percent = 0.0 if self.percent and self.expected_count: percent = self.percent * 100 log = logging.info if self.warn(): log = logging.warning log("%d of %d expected %s events (%s) occurred in the last %s.", self.count, self.expected_count, self.event_name, format_percent(percent), format_delta(self.since_reset())) self.reset() def warn(self): if self.repeat_interval and self.min_percent: if not self.expected_count: return False if self.percent < self.min_percent: return True return False class FrequencyMonitor(Monitor): """ A frequency monitor tracks the number of pings received during a fixed period of time. It has normal and warn states; a warn state is triggered when the minimum expected pings were not received during the specified interval. The component being monitored should register a reactor event that checks the warn state of this monitor every N seconds. """ def __init__(self, repeat_interval, min_frequency, event_name, create_time=None): super(FrequencyMonitor, self).__init__(event_name, create_time=create_time) self.repeat_interval = repeat_interval self.min_frequency = min_frequency self._last_count = self._create_time() @property def expected_count(self): since_ping = self._create_time() - self._last_count return since_ping // self.repeat_interval def ping(self): super(FrequencyMonitor, self).ping() self._last_count = self._create_time() def log(self): if self.warn(): logging.warning("Only %d of %d minimum expected %s events " "occurred in the last %s.", self.count, self.expected_count, self.event_name, format_delta(self.repeat_interval)) self.reset() def warn(self): if self.repeat_interval and self.min_frequency: if (self._create_time() - self._last_count >= self.repeat_interval and self.count < self.min_frequency): return True return False landscape-client-14.01/landscape/lib/amp.py0000644000175000017500000005154512301414317020376 0ustar andreasandreas"""Expose the methods of a remote object over AMP. This module implements an AMP-based protocol for performing remote procedure calls in a convenient and easy way. It's conceptually similar to DBus in that it supports exposing a Python object to a remote process, with communication happening over any Twisted-supported transport, e.g. Unix domain sockets. For example let's say we have a Python process "A" that creates an instance of this class:: class Greeter(object): def hello(self, name): return "hi %s!" % name greeter = Greeter() Process A can "publish" the greeter object by defining which methods are exposed remotely and opening a Unix socket for incoming connections:: factory = MethodCallServerFactory(greeter, ["hello"]) reactor.listenUNIX("/some/socket/path", factory) Then a second Python process "B" can connect to that socket and build a "remote" greeter object, i.e. a proxy that forwards method calls to the real greeter object living in process A:: factory = MethodCallClientFactory() reactor.connectUNIX("/some/socket/path", factory) def got_remote(remote_greeter): deferred = remote_greeter.hello("Ted") deferred.addCallback(lambda result: ... # result == "hi Ted!") factory.getRemoteObject().addCallback(got_remote) Note that when invoking a method via the remote proxy, the parameters are required to be serializable with bpickle, so they can be sent over the wire. See also:: http://twistedmatrix.com/documents/current/core/howto/amp.html for more details about the Twisted AMP protocol. """ from uuid import uuid4 from twisted.internet.defer import Deferred, maybeDeferred, succeed from twisted.internet.protocol import ServerFactory, ReconnectingClientFactory from twisted.python.failure import Failure from twisted.protocols.amp import ( Argument, String, Integer, Command, AMP, MAX_VALUE_LENGTH, CommandLocator) from landscape.lib.bpickle import loads, dumps, dumps_table class MethodCallArgument(Argument): """A bpickle-compatible argument.""" def toString(self, inObject): """Serialize an argument.""" return dumps(inObject) def fromString(self, inString): """Unserialize an argument.""" return loads(inString) @classmethod def check(cls, inObject): """Check if an argument is serializable.""" return type(inObject) in dumps_table class MethodCallError(Exception): """Raised when a L{MethodCall} command fails.""" class MethodCall(Command): """Call a method on the object exposed by a L{MethodCallServerFactory}. The command arguments have the following semantics: - C{sequence}: An integer uniquely indentifying a the L{MethodCall} being issued. The name 'sequence' is a bit misleading because it's really a uuid, since its values in practice are not in sequential order, they are just random values. The name is kept just for backward compatibility. - C{method}: The name of the method to invoke on the remote object. - C{arguments}: A BPickled binary tuple of the form C{(args, kwargs)}, where C{args} are the positional arguments to be passed to the method and C{kwargs} the keyword ones. """ arguments = [("sequence", Integer()), ("method", String()), ("arguments", String())] response = [("result", MethodCallArgument())] errors = {MethodCallError: "METHOD_CALL_ERROR"} class MethodCallChunk(Command): """Send a chunk of L{MethodCall} containing a portion of the arguments. When a the arguments of a L{MethodCall} are bigger than 64k, they get split in several L{MethodCallChunk}s that are buffered on the receiver side. The command arguments have the following semantics: - C{sequence}: The unique integer associated with the L{MethodCall} that this L{MethodCallChunk} is part of. - C{chunk}: A portion of the big BPickle C{arguments} string which is being split and buffered. """ arguments = [("sequence", Integer()), ("chunk", String())] response = [("result", Integer())] errors = {MethodCallError: "METHOD_CALL_ERROR"} class MethodCallReceiver(CommandLocator): """Expose methods of a local object over AMP. @param obj: The Python object to be exposed. @param methods: The list of the object's methods that can be called remotely. """ def __init__(self, obj, methods): CommandLocator.__init__(self) self._object = obj self._methods = methods self._pending_chunks = {} @MethodCall.responder def receive_method_call(self, sequence, method, arguments): """Call an object's method with the given arguments. If a connected client sends a L{MethodCall} for method C{foo_bar}, then the actual method C{foo_bar} of the object associated with the protocol will be called with the given C{args} and C{kwargs} and its return value delivered back to the client as response to the command. @param sequence: The integer that uniquely identifies the L{MethodCall} being received. @param method: The name of the object's method to call. @param arguments: A bpickle'd binary tuple of (args, kwargs) to be passed to the method. In case this L{MethodCall} has been preceded by one or more L{MethodCallChunk}s, C{arguments} is the last chunk of data. """ chunks = self._pending_chunks.pop(sequence, None) if chunks is not None: # We got some L{MethodCallChunk}s before, this is the last. chunks.append(arguments) arguments = "".join(chunks) args, kwargs = loads(arguments) if not method in self._methods: raise MethodCallError("Forbidden method '%s'" % method) method_func = getattr(self._object, method) def handle_result(result): return {"result": self._check_result(result)} def handle_failure(failure): raise MethodCallError(failure.value) deferred = maybeDeferred(method_func, *args, **kwargs) deferred.addCallback(handle_result) deferred.addErrback(handle_failure) return deferred @MethodCallChunk.responder def receive_method_call_chunk(self, sequence, chunk): """Receive a part of a multi-chunk L{MethodCall}. Add the received C{chunk} to the buffer of the L{MethodCall} identified by C{sequence}. """ self._pending_chunks.setdefault(sequence, []).append(chunk) return {"result": sequence} def _check_result(self, result): """Check that the C{result} we're about to return is serializable. @return: The C{result} itself if valid. @raises: L{MethodCallError} if C{result} is not serializable. """ if not MethodCallArgument.check(result): raise MethodCallError("Non-serializable result") return result class MethodCallSender(object): """Call methods on a remote object over L{AMP} and return the result. @param protocol: A connected C{AMP} protocol. @param clock: An object implementing the C{IReactorTime} interface. @ivar timeout: A timeout for remote method class, see L{send_method_call}. """ timeout = 60 _chunk_size = MAX_VALUE_LENGTH def __init__(self, protocol, clock): self._protocol = protocol self._clock = clock def _call_remote_with_timeout(self, command, **kwargs): """Send an L{AMP} command that will errback in case of a timeout. @return: A deferred resulting in the command's response (or failure) if the peer responds within C{self.timeout} seconds, or that errbacks with a L{MethodCallError} otherwise. """ deferred = Deferred() def handle_response(response): if not call.active(): # Late response for a request that has timeout, # just ignore it. return call.cancel() deferred.callback(response) def handle_timeout(): # The peer didn't respond on time, raise an error. deferred.errback(MethodCallError("timeout")) call = self._clock.callLater(self.timeout, handle_timeout) result = self._protocol.callRemote(command, **kwargs) result.addBoth(handle_response) return deferred def send_method_call(self, method, args=[], kwargs={}): """Send a L{MethodCall} command with the given arguments. If a response from the server is not received within C{self.timeout} seconds, the returned deferred will errback with a L{MethodCallError}. @param method: The name of the remote method to invoke. @param args: The positional arguments to pass to the remote method. @param kwargs: The keyword arguments to pass to the remote method. @return: A C{Deferred} firing with the return value of the method invoked on the remote object. If the remote method itself returns a deferred, we fire with the callback value of such deferred. """ arguments = dumps((args, kwargs)) sequence = uuid4().int # Split the given arguments in one or more chunks chunks = [arguments[i:i + self._chunk_size] for i in xrange(0, len(arguments), self._chunk_size)] result = Deferred() if len(chunks) > 1: # If we have N chunks, send the first N-1 as MethodCallChunk's for chunk in chunks[:-1]: def create_send_chunk(sequence, chunk): send_chunk = lambda x: self._protocol.callRemote( MethodCallChunk, sequence=sequence, chunk=chunk) return send_chunk result.addCallback(create_send_chunk(sequence, chunk)) def send_last_chunk(ignored): chunk = chunks[-1] return self._call_remote_with_timeout( MethodCall, sequence=sequence, method=method, arguments=chunk) result.addCallback(send_last_chunk) result.addCallback(lambda response: response["result"]) result.callback(None) return result class MethodCallServerProtocol(AMP): """Receive L{MethodCall} commands over the wire and send back results.""" def __init__(self, obj, methods): AMP.__init__(self, locator=MethodCallReceiver(obj, methods)) class MethodCallClientProtocol(AMP): """Send L{MethodCall} commands over the wire using the AMP protocol.""" factory = None def connectionMade(self): """Notify our factory that we're ready to go.""" if self.factory is not None: # Factory can be None in unit-tests self.factory.clientConnectionMade(self) class RemoteObject(object): """An object able to transparently call methods on a remote object. Any method call on a L{RemoteObject} instance will return a L{Deferred} resulting in the return value of the same method call performed on the remote object exposed by the peer. """ def __init__(self, factory): """ @param factory: The L{MethodCallClientFactory} used for connecting to the other peer. Look there if you need to tweak the behavior of this L{RemoteObject}. """ self._sender = None self._pending_requests = {} self._factory = factory self._factory.notifyOnConnect(self._handle_connect) def __getattr__(self, method): """Return a function sending a L{MethodCall} for the given C{method}. When the created function is called, it sends the an appropriate L{MethodCall} to the remote peer passing it the arguments and keyword arguments it was called with, and returning a L{Deferred} resulting in the L{MethodCall}'s response value. """ def send_method_call(*args, **kwargs): deferred = Deferred() self._send_method_call(method, args, kwargs, deferred) return deferred return send_method_call def _send_method_call(self, method, args, kwargs, deferred, call=None): """Send a L{MethodCall} command, adding callbacks to handle retries.""" result = self._sender.send_method_call(method=method, args=args, kwargs=kwargs) result.addCallback(self._handle_result, deferred, call=call) result.addErrback(self._handle_failure, method, args, kwargs, deferred, call=call) if self._factory.fake_connection is not None: # Transparently flush the connection after a send_method_call # invocation letting tests simulate a synchronous transport. # This is needed because the Twisted's AMP implementation # assume that the transport is asynchronous. self._factory.fake_connection.flush() def _handle_result(self, result, deferred, call=None): """Handles a successful C{send_method_call} result. @param response: The L{MethodCall} response. @param deferred: The deferred that was returned to the caller. @param call: If not C{None}, the scheduled timeout call associated with the given deferred. """ if call is not None: call.cancel() # This is a successful retry, cancel the timeout. deferred.callback(result) def _handle_failure(self, failure, method, args, kwargs, deferred, call=None): """Called when a L{MethodCall} command fails. If a failure is due to a connection error and if C{retry_on_reconnect} is C{True}, we will try to perform the requested L{MethodCall} again as soon as a new connection becomes available, giving up after the specified C{timeout}, if any. @param failure: The L{Failure} raised by the requested L{MethodCall}. @param name: The method name associated with the failed L{MethodCall}. @param args: The positional arguments of the failed L{MethodCall}. @param kwargs: The keyword arguments of the failed L{MethodCall}. @param deferred: The deferred that was returned to the caller. @param call: If not C{None}, the scheduled timeout call associated with the given deferred. """ is_method_call_error = failure.type is MethodCallError dont_retry = self._factory.retryOnReconnect is False if is_method_call_error or dont_retry: # This means either that the connection is working, and a # MethodCall protocol error occured, or that we gave up # trying and raised a timeout. In any case just propagate # the error. if deferred in self._pending_requests: self._pending_requests.pop(deferred) if call: call.cancel() deferred.errback(failure) return if self._factory.retryTimeout and call is None: # This is the first failure for this request, let's schedule a # timeout call. timeout = Failure(MethodCallError("timeout")) call = self._factory.clock.callLater(self._factory.retryTimeout, self._handle_failure, timeout, method, args, kwargs, deferred=deferred) self._pending_requests[deferred] = (method, args, kwargs, call) def _handle_connect(self, protocol): """Handles a reconnection. @param protocol: The newly connected protocol instance. """ self._sender = MethodCallSender(protocol, self._factory.clock) if self._factory.retryOnReconnect: self._retry() def _retry(self): """Try to perform again requests that failed.""" # We need to copy the requests list before iterating over it, because # if we are actually still disconnected, callRemote will return a # failed deferred and the _handle_failure errback will be executed # synchronously during the loop, modifing the requests list itself. requests = self._pending_requests.copy() self._pending_requests.clear() while requests: deferred, (method, args, kwargs, call) = requests.popitem() self._send_method_call(method, args, kwargs, deferred, call=call) class MethodCallServerFactory(ServerFactory): """Expose a Python object using L{MethodCall} commands over C{AMP}.""" protocol = MethodCallServerProtocol def __init__(self, obj, methods): """ @param object: The object exposed by the L{MethodCallProtocol}s instances created by this factory. @param methods: A list of the names of the methods that remote peers are allowed to call on the C{object} that we publish. """ self.object = obj self.methods = methods def buildProtocol(self, addr): protocol = self.protocol(self.object, self.methods) protocol.factory = self return protocol class MethodCallClientFactory(ReconnectingClientFactory): """ Factory for L{MethodCallClientProtocol}s exposing an object or connecting to L{MethodCall} servers. When used to connect, if the connection fails or is lost the factory will keep retrying to establish it. @ivar factor: The time factor by which the delay between two subsequent connection retries will increase. @ivar maxDelay: Maximum number of seconds between connection attempts. @ivar protocol: The factory used to build protocol instances. @ivar remote: The factory used to build remote object instances. @ivar retryOnReconnect: If C{True}, the remote object returned by the C{getRemoteObject} method will retry requests that failed, as a result of a lost connection, as soon as a new connection is available. @param retryTimeout: A timeout for retrying requests, if the remote object can't perform them again successfully within this number of seconds, they will errback with a L{MethodCallError}. """ factor = 1.6180339887498948 maxDelay = 30 protocol = MethodCallClientProtocol remote = RemoteObject retryOnReconnect = False retryTimeout = None # XXX support exposing fake asynchronous connections created by tests, so # they can be flushed transparently and emulate a synchronous behavior. See # also http://twistedmatrix.com/trac/ticket/6502, once that's fixed this # hack can be removed. fake_connection = None def __init__(self, clock): """ @param object: The object exposed by the L{MethodCallProtocol}s instances created by this factory. @param reactor: The reactor used by the created protocols to schedule notifications and timeouts. """ self.clock = clock self.delay = self.initialDelay self._connects = [] self._requests = [] self._remote = None def getRemoteObject(self): """Get a L{RemoteObject} as soon as the connection is ready. @return: A C{Deferred} firing with a connected L{RemoteObject}. """ if self._remote is not None: return succeed(self._remote) deferred = Deferred() self._requests.append(deferred) return deferred def notifyOnConnect(self, callback): """Invoke the given C{callback} when a connection is re-established.""" self._connects.append(callback) def dontNotifyOnConnect(self, callback): """Remove the given C{callback} from listeners.""" self._connects.remove(callback) def clientConnectionMade(self, protocol): """Called when a newly built protocol gets connected.""" if self._remote is None: # This is the first time we successfully connect self._remote = self.remote(self) for callback in self._connects: callback(protocol) # In all cases fire pending requests self._fire_requests(self._remote) def clientConnectionFailed(self, connector, reason): """Try to connect again or errback pending request.""" ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) if self._callID is None: # The factory won't retry to connect, so notify that we failed self._fire_requests(reason) def buildProtocol(self, addr): self.resetDelay() protocol = ReconnectingClientFactory.buildProtocol(self, addr) return protocol def _fire_requests(self, result): """ Fire all pending L{getRemoteObject} deferreds with the given C{result}. """ requests = self._requests[:] self._requests = [] for deferred in requests: deferred.callback(result) landscape-client-14.01/landscape/lib/fd.py0000644000175000017500000000135712301414317020206 0ustar andreasandreas"""A utility module which has FD-related functions. This module mostly exists for L{clean_fds}, so it can be imported without accidentally getting a reactor or something else that might create a critical file descriptor. """ import os import resource def clean_fds(): """Close all non-stdio file descriptors. This should be called at the beginning of a program to avoid inheriting any unwanted file descriptors from the invoking process. Unfortunately, this is really common in unix! """ rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[1] total_descriptors = min(4096, rlimit_nofile) for fd in range(3, total_descriptors): try: os.close(fd) except OSError: pass landscape-client-14.01/landscape/lib/encoding.py0000644000175000017500000000066112301414317021400 0ustar andreasandreas def encode_if_needed(value): """ A small helper to decode unicode to utf-8 if needed. """ if isinstance(value, unicode): value = value.encode("utf-8") return value def encode_dict_if_needed(values): """ A wrapper taking a dict and passing each of the values to encode_if_needed. """ for key, value in values.iteritems(): values[key] = encode_if_needed(value) return values landscape-client-14.01/landscape/lib/disk.py0000644000175000017500000001147512301414317020551 0ustar andreasandreasfrom __future__ import division import os import statvfs import re # List of filesystem types authorized when generating disk use statistics. STABLE_FILESYSTEMS = frozenset( ["ext", "ext2", "ext3", "ext4", "reiserfs", "ntfs", "msdos", "dos", "vfat", "xfs", "hpfs", "jfs", "ufs", "hfs", "hfsplus"]) EXTRACT_DEVICE = re.compile("([a-z]+)[0-9]*") def get_mount_info(mounts_file, statvfs_, filesystems_whitelist=STABLE_FILESYSTEMS): """ This is a generator that yields information about mounted filesystems. @param mounts_file: A file with information about mounted filesystems, such as C{/proc/mounts}. @param statvfs_: A function to get file status information. @param filesystems_whitelist: Optionally, a list of which filesystems to stat. @return: A C{dict} with C{device}, C{mount-point}, C{filesystem}, C{total-space} and C{free-space} keys. If the filesystem information is not available, C{None} is returned. Both C{total-space} and C{free-space} are in megabytes. """ for line in open(mounts_file): try: device, mount_point, filesystem = line.split()[:3] mount_point = mount_point.decode("string-escape") except ValueError: continue if (filesystems_whitelist is not None and filesystem not in filesystems_whitelist): continue megabytes = 1024 * 1024 try: stats = statvfs_(mount_point) except OSError: continue block_size = stats[statvfs.F_BSIZE] total_space = (stats[statvfs.F_BLOCKS] * block_size) // megabytes free_space = (stats[statvfs.F_BFREE] * block_size) // megabytes yield {"device": device, "mount-point": mount_point, "filesystem": filesystem, "total-space": total_space, "free-space": free_space} def get_filesystem_for_path(path, mounts_file, statvfs_): """ Tries to determine to which of the mounted filesystem C{path} belongs to, and then returns information about that filesystem or C{None} if it couldn't be determined. @param path: The path we want filesystem information about. @param mounts_file: A file with information about mounted filesystems, such as C{/proc/mounts}. @param statvfs_: A function to get file status information. @param filesystems_whitelist: Optionally, a list of which filesystems to stat. @return: A C{dict} with C{device}, C{mount-point}, C{filesystem}, C{total-space} and C{free-space} keys. If the filesystem information is not available, C{None} is returned. Both C{total-space} and C{free-space} are in megabytes. """ candidate = None path = os.path.realpath(path) path_segments = path.split("/") for info in get_mount_info(mounts_file, statvfs_): mount_segments = info["mount-point"].split("/") if path.startswith(info["mount-point"]): if ((not candidate) or path_segments[:len(mount_segments)] == mount_segments): candidate = info return candidate def is_device_removable(device): """ This function returns whether a given device is removable or not by looking at the corresponding /sys/block//removable file @param device: The filesystem path to the device, e.g. /dev/sda1 """ # Shortcut the case where the device an SD card. The kernel/udev currently # consider SD cards (mmcblk devices) to be non-removable. if os.path.basename(device).startswith("mmcblk"): return True path = _get_device_removable_file_path(device) if not path: return False contents = None try: with open(path, "r") as f: contents = f.readline() except IOError: return False if contents.strip() == "1": return True return False def _get_device_removable_file_path(device): """ Get a device's "removable" file path. This function figures out the C{/sys/block//removable} path associated with the given device. The file at that path contains either a "0" if the device is not removable, or a "1" if it is. @param device: File system path of the device. """ # The device will be a symlink if the disk is mounted by uuid or by label. if os.path.islink(device): # Paths are in the form "/dev/disk/by-uuid/" and symlink # to the device file under /dev device = os.readlink(device) # /dev/disk/by-uuid/ -> ../../sda1 [device_name] = device.split("/")[-1:] # /dev/sda1 -> sda1 matched = EXTRACT_DEVICE.match(device_name) # sda1 -> sda if not matched: return None device_name = matched.groups()[0] removable_file = os.path.join("/sys/block/", device_name, "removable") return removable_file landscape-client-14.01/landscape/lib/hashlib.py0000644000175000017500000000041112301414317021215 0ustar andreasandreas"""Provide backward compatible access to hashlib functions.""" try: _hashlib = __import__("hashlib") except ImportError: from md5 import md5 from sha import sha as sha1 else: md5 = _hashlib.md5 sha1 = _hashlib.sha1 __all__ = ["md5", "sha1"] landscape-client-14.01/landscape/sysvconfig.py0000644000175000017500000000403512301414317021235 0ustar andreasandreas"""Programmatically manage the Landscape client SysV-style init script.""" import os class ProcessError(Exception): """ Error running a process with os.system. """ class SysVConfig(object): """Configure and drive the Landscape client init script. @param filename: Path to the file holding init scripts env variables. """ def __init__(self, filename="/etc/default/landscape-client"): self._filename = filename def set_start_on_boot(self, flag): """Make the init script decide to start the client when it's run.""" current = self._parse_file() current["RUN"] = flag and 1 or 0 self._write_file(current) def restart_landscape(self): """Restart the Landscape client service.""" if os.system("/etc/init.d/landscape-client restart"): raise ProcessError("Could not restart client") def stop_landscape(self): """Stop the Landscape client service.""" if os.system("/etc/init.d/landscape-client stop"): raise ProcessError("Could not stop client") def is_configured_to_run(self): """ Return a boolean representing whether the init script will decide to actually start the client when it is run. This method should match the semantics of the checks in debian/landscape-client.init. """ state = self._parse_file() run_value = state.get("RUN", "0") return (not run_value[:1].isspace()) and run_value != "0" def _parse_file(self): values = {} # Only attempt to parse the file if it exists. if os.path.isfile(self._filename): for line in open(self._filename, "r"): line = line.strip() if "=" in line: key, value = line.split("=") values[key] = value return values def _write_file(self, values): file = open(self._filename, "w") for key in sorted(values.keys()): file.write("%s=%s\n" % (key, str(values[key]))) file.close() landscape-client-14.01/landscape/log.py0000644000175000017500000000315012301414317017621 0ustar andreasandreasimport inspect import logging def format_object(object): """ Returns a fully-qualified name for the specified object, such as 'landscape.log.format_object()'. """ if inspect.ismethod(object): # FIXME If the method is implemented on a base class of # object's class, the module name and function name will be # from the base class and the method's class name will be from # object's class. name = repr(object).split(" ")[2] return "%s.%s()" % (object.__module__, name) elif inspect.isfunction(object): name = repr(object).split(" ")[1] return "%s.%s()" % (object.__module__, name) return "%s.%s" % (object.__class__.__module__, object.__class__.__name__) def format_delta(seconds): if not seconds: seconds = 0.0 return "%.02fs" % float(seconds) def format_percent(percent): if not percent: percent = 0.0 return "%.02f%%" % float(percent) def rotate_logs(): """ This closes and reopens the underlying files in the logging module's root logger. If called after logrotate (or something similar) has moved the old log file out of the way, this will start writing to a new new log file... """ for handler in logging.getLogger().handlers: if isinstance(handler, logging.FileHandler): handler.acquire() try: handler.stream.close() handler.stream = open(handler.baseFilename, handler.mode) finally: handler.release() logging.info("Landscape Logs rotated") landscape-client-14.01/landscape/upgraders/0000755000175000017500000000000012301414317020463 5ustar andreasandreaslandscape-client-14.01/landscape/upgraders/package.py0000644000175000017500000000013312301414317022425 0ustar andreasandreasfrom landscape.patch import SQLiteUpgradeManager upgrade_manager = SQLiteUpgradeManager() landscape-client-14.01/landscape/upgraders/tests/0000755000175000017500000000000012301414317021625 5ustar andreasandreaslandscape-client-14.01/landscape/upgraders/tests/test_package.py0000644000175000017500000000046412301414317024635 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.patch import SQLiteUpgradeManager from landscape.upgraders import package class TestPackageUpgraders(LandscapeTest): def test_package_upgrade_manager(self): self.assertEqual(type(package.upgrade_manager), SQLiteUpgradeManager) landscape-client-14.01/landscape/upgraders/tests/test_broker.py0000644000175000017500000000044412301414317024524 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.patch import UpgradeManager from landscape.upgraders import broker class TestBrokerUpgraders(LandscapeTest): def test_broker_upgrade_manager(self): self.assertEqual(type(broker.upgrade_manager), UpgradeManager) landscape-client-14.01/landscape/upgraders/tests/test_monitor.py0000644000175000017500000000045012301414317024724 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.patch import UpgradeManager from landscape.upgraders import monitor class TestMonitorUpgraders(LandscapeTest): def test_monitor_upgrade_manager(self): self.assertEqual(type(monitor.upgrade_manager), UpgradeManager) landscape-client-14.01/landscape/upgraders/tests/__init__.py0000644000175000017500000000000012301414317023724 0ustar andreasandreaslandscape-client-14.01/landscape/upgraders/__init__.py0000644000175000017500000000035212301414317022574 0ustar andreasandreasfrom landscape.upgraders import broker, monitor, package UPGRADE_MANAGERS = { # these should not be hardcoded "broker": broker.upgrade_manager, "monitor": monitor.upgrade_manager, "package": package.upgrade_manager} landscape-client-14.01/landscape/upgraders/broker.py0000644000175000017500000000011712301414317022320 0ustar andreasandreasfrom landscape.patch import UpgradeManager upgrade_manager = UpgradeManager() landscape-client-14.01/landscape/upgraders/monitor.py0000644000175000017500000000012012301414317022515 0ustar andreasandreasfrom landscape.patch import UpgradeManager upgrade_manager = UpgradeManager() landscape-client-14.01/landscape/patch.py0000644000175000017500000000715712301414317020152 0ustar andreasandreasimport logging class UpgraderConflict(Exception): """Two upgraders with the same version have been registered.""" class UpgradeManagerBase(object): """A simple upgrade system.""" def __init__(self): self._upgraders = {} def register_upgrader(self, version, function): """ @param version: The version number that this upgrader is upgrading the database to. This defines the order that upgraders are run. @param function: The function to call when applying upgraders. It must take a single object, the database that is being upgraded. """ if version in self._upgraders: raise UpgraderConflict("%s is already registered as %s; " "not adding %s" % ( version, self._upgraders[version], function)) self._upgraders[version] = function def get_version(self): """ Get the 'current' version of any database that this UpgradeManager will be applied to. """ keys = self._upgraders.keys() if keys: return max(keys) return 0 def upgrader(self, version): """ A decorator for specifying that a function is an upgrader for this upgrade manager. @param version: The version number that the function will be upgrading to. """ def inner(function): self.register_upgrader(version, function) return function return inner class UpgradeManager(UpgradeManagerBase): def apply(self, persist): """Bring the database up-to-date. @param persist: The database to upgrade. It will be passed to all upgrade functions. """ if not persist.has("system-version"): persist.set("system-version", 0) for version, upgrader in sorted(self._upgraders.items()): if version > persist.get("system-version"): persist.set("system-version", version) upgrader(persist) logging.info("Successfully applied patch %s" % version) def initialize(self, persist): """ Mark the database as being up-to-date; use this when initializing a new database. """ persist.set("system-version", self.get_version()) class SQLiteUpgradeManager(UpgradeManagerBase): """An upgrade manager backed by sqlite.""" def get_database_versions(self, cursor): cursor.execute("SELECT version FROM patch") result = cursor.fetchall() return set([row[0] for row in result]) def get_database_version(self, cursor): cursor.execute("SELECT MAX(version) FROM patch") version = cursor.fetchone()[0] if version: return version return 0 def apply(self, cursor): """Bring the database up-to-date.""" versions = self.get_database_versions(cursor) for version, upgrader in sorted(self._upgraders.items()): if version not in versions: self.apply_one(version, cursor) def apply_one(self, version, cursor): upgrader = self._upgraders[version] upgrader(cursor) cursor.execute("INSERT INTO patch VALUES (?)", (version,)) def initialize(self, cursor): """ Mark the database as being up-to-date; use this when initializing a new SQLite database. """ cursor.execute("CREATE TABLE patch (version INTEGER)") for version, upgrader in sorted(self._upgraders.items()): cursor.execute("INSERT INTO patch VALUES (?)", (version,)) landscape-client-14.01/landscape/reactor.py0000644000175000017500000003624712301414317020514 0ustar andreasandreas""" Extend the regular Twisted reactor with event-handling features. """ import time import sys import logging import bisect from twisted.python.failure import Failure from twisted.internet.error import ConnectError from twisted.internet.threads import deferToThread from landscape.log import format_object class InvalidID(Exception): """Raised when an invalid ID is used with reactor.cancel_call().""" class CallHookError(Exception): """Raised when hooking on a reactor incorrectly.""" class EventID(object): """Unique identifier for an event handler. @param event_type: Name of the event type handled by the handler. @param pair: Binary tuple C{(handler, priority)} holding the handler function and its priority. """ def __init__(self, event_type, pair): self._event_type = event_type self._pair = pair class EventHandlingReactorMixin(object): """Fire events identified by strings and register handlers for them. Note that event handlers are executed synchronously when the C{fire} method is called, so unit-tests can generally exercise events without needing to run the real Twisted reactor (except of course if the event handlers themselves contain asynchronous calls that need the Twisted reactor running). """ def __init__(self): super(EventHandlingReactorMixin, self).__init__() self._event_handlers = {} def call_on(self, event_type, handler, priority=0): """Register an event handler. The handler will be invoked every time an event of the given type is fired (there's no need to re-register the handler after the event is fired). @param event_type: The name of the event type to handle. @param handler: The function handling the given event type. @param priority: The priority of the given handler function. @return: The L{EventID} of the registered handler. """ pair = (handler, priority) handlers = self._event_handlers.setdefault(event_type, []) handlers.append(pair) handlers.sort(key=lambda pair: pair[1]) return EventID(event_type, pair) def fire(self, event_type, *args, **kwargs): """Fire an event of a given type. Call all handlers registered for the given C{event_type}, in order of priority. @param event_type: The name of the event type to fire. @param args: Positional arguments to pass to the registered handlers. @param kwargs: Keyword arguments to pass to the registered handlers. """ logging.debug("Started firing %s.", event_type) results = [] # Make a copy of the handlers that are registered at this point in # time, so we have a stable list in case handlers are cancelled # dynamically by executing the handlers themselves. handlers = list(self._event_handlers.get(event_type, ())) for handler, priority in handlers: try: logging.debug("Calling %s for %s with priority %d.", format_object(handler), event_type, priority) results.append(handler(*args, **kwargs)) except KeyboardInterrupt: logging.exception("Keyboard interrupt while running event " "handler %s for event type %r with " "args %r %r.", format_object(handler), event_type, args, kwargs) self.stop() raise except: logging.exception("Error running event handler %s for " "event type %r with args %r %r.", format_object(handler), event_type, args, kwargs) logging.debug("Finished firing %s.", event_type) return results def cancel_call(self, id): """Unregister an event handler. @param id: the L{EventID} of the handler to unregister. """ if type(id) is EventID: self._event_handlers[id._event_type].remove(id._pair) else: raise InvalidID("EventID instance expected, received %r" % id) class ReactorID(object): def __init__(self, timeout): self._timeout = timeout class LandscapeReactor(EventHandlingReactorMixin): """Wrap and add functionalities to the Twisted reactor. This is essentially a facade around the L{twisted.internet.reactor} and will delegate to it for mostly everything except event handling features which are implemented using L{EventHandlingReactorMixin}. """ def __init__(self): from twisted.internet import reactor from twisted.internet.task import LoopingCall self._LoopingCall = LoopingCall self._reactor = reactor self._cleanup() self.callFromThread = reactor.callFromThread super(LandscapeReactor, self).__init__() def time(self): """Get current time. @see L{time.time} """ return time.time() def call_later(self, *args, **kwargs): """Call a function later. Simply call C{callLater(*args, **kwargs)} and return its result. @see: L{twisted.internet.interfaces.IReactorTime.callLater}. """ return self._reactor.callLater(*args, **kwargs) def call_every(self, seconds, f, *args, **kwargs): """Call a function repeatedly. Create a new L{twisted.internet.task.LoopingCall} object and start it. @return: the created C{LoopingCall} object. """ lc = self._LoopingCall(f, *args, **kwargs) lc.start(seconds, now=False) return lc def cancel_call(self, id): """Cancel a scheduled function or event handler. @param id: The function call or handler to remove. It can be an L{EventID}, a L{LoopingCall} or a C{IDelayedCall}, as returned by L{call_on}, L{call_every} and L{call_later} respectively. """ if isinstance(id, EventID): return EventHandlingReactorMixin.cancel_call(self, id) if isinstance(id, self._LoopingCall): return id.stop() if id.active(): id.cancel() def call_when_running(self, f): """Schedule a function to be called when the reactor starts running.""" self._reactor.callWhenRunning(f) def call_in_main(self, f, *args, **kwargs): """Cause a function to be executed by the reactor thread. @param f: The callable object to execute. @param args: The arguments to call it with. @param kwargs: The keyword arguments to call it with. @see: L{twisted.internet.interfaces.IReactorThreads.callFromThread} """ self._reactor.callFromThread(f, *args, **kwargs) def call_in_thread(self, callback, errback, f, *args, **kwargs): """ Execute a callable object in a new separate thread. @param callback: A function to call in case C{f} was successful, it will be passed the return value of C{f}. @param errback: A function to call in case C{f} raised an exception, it will be pass a C{(type, value, traceback)} tuple giving information about the raised exception (see L{sys.exc_info}). @note: Both C{callback} and C{errback} will be executed in the the parent thread. """ def on_success(result): if callback: return callback(result) def on_failure(failure): exc_info = (failure.type, failure.value, failure.tb) if errback: errback(*exc_info) else: logging.error(exc_info[1], exc_info=exc_info) deferred = deferToThread(f, *args, **kwargs) deferred.addCallback(on_success) deferred.addErrback(on_failure) def listen_unix(self, socket, factory): """Start listening on a Unix socket.""" return self._reactor.listenUNIX(socket, factory, wantPID=True) def connect_unix(self, socket, factory): """Connect to a Unix socket.""" return self._reactor.connectUNIX(socket, factory) def run(self): """Start the reactor, a C{"run"} event will be fired.""" self.fire("run") self._reactor.run() self.fire("stop") def stop(self): """Stop the reactor, a C{"stop"} event will be fired.""" self._reactor.stop() self._cleanup() def _cleanup(self): # Since the reactor is global, we should clean it up when we # initialize one of our wrappers. for call in self._reactor.getDelayedCalls(): if call.active(): call.cancel() class FakeReactorID(object): def __init__(self, data): self.active = True self._data = data class FakeReactor(EventHandlingReactorMixin): """A fake reactor with the same API of L{LandscapeReactor}. This reactor emulates the asychronous interface of L{LandscapeReactor}, but implementing it in a synchronous way, for easier unit-testing. Note that the C{listen_unix} method is *not* emulated, but rather inherited blindly from L{UnixReactorMixin}, this means that there's no way to control it in a synchronous way (see the docstring of the mixin). A better approach would be to fake the AMP transport (i.e. fake the twisted abstractions around Unix sockets), and implement a fake version C{listen_unix}, but this hasn't been done yet. """ # XXX probably this shouldn't be a class attribute, but we need client-side # FakeReactor instaces to be aware of listening sockets created by # server-side FakeReactor instances. _socket_paths = {} def __init__(self): super(FakeReactor, self).__init__() self._current_time = 0 self._calls = [] self.hosts = {} self._threaded_callbacks = [] # XXX we need a reference to the Twisted reactor as well because # some tests use it from twisted.internet import reactor self._reactor = reactor def time(self): return float(self._current_time) def call_later(self, seconds, f, *args, **kwargs): scheduled_time = self._current_time + seconds call = (scheduled_time, f, args, kwargs) bisect.insort_left(self._calls, call) return FakeReactorID(call) def call_every(self, seconds, f, *args, **kwargs): def fake(): # update the call so that cancellation will continue # working with the same ID. And do it *before* the call # because the call might cancel it! call._data = self.call_later(seconds, fake)._data try: f(*args, **kwargs) except: if call.active: self.cancel_call(call) raise call = self.call_later(seconds, fake) return call def cancel_call(self, id): if type(id) is FakeReactorID: if id._data in self._calls: self._calls.remove(id._data) id.active = False else: super(FakeReactor, self).cancel_call(id) def call_when_running(self, f): # Just schedule a call that will be kicked by the run() method. self.call_later(0, f) def call_in_main(self, f, *args, **kwargs): """Schedule a function for execution in the main thread.""" self._threaded_callbacks.append(lambda: f(*args, **kwargs)) def call_in_thread(self, callback, errback, f, *args, **kwargs): """Emulate L{LandscapeReactor.call_in_thread} without spawning threads. Note that running threaded callbacks here doesn't reflect reality, since they're usually run while the main reactor loop is active. At the same time, this is convenient as it means we don't need to run the the real Twisted reactor with to test actions performed on completion of specific events (e.g. L{MessageExchange.exchange} uses call_in_thread to run the HTTP request in a separate thread, because we use libcurl which is blocking). IOW, it's easier to test things synchronously. """ self._in_thread(callback, errback, f, args, kwargs) self._run_threaded_callbacks() def listen_unix(self, socket_path, factory): class FakePort(object): def stopListening(oself): self._socket_paths.pop(socket_path) self._socket_paths[socket_path] = factory return FakePort() def connect_unix(self, path, factory): server = self._socket_paths.get(path) from landscape.lib.tests.test_amp import FakeConnector if server: connector = FakeConnector(factory, server) connector.connect() else: connector = object() # Fake connector failure = Failure(ConnectError("No such file or directory")) factory.clientConnectionFailed(connector, failure) return connector def run(self): """Continuously advance this reactor until reactor.stop() is called.""" self.fire("run") self._running = True while self._running: self.advance(self._calls[0][0]) self.fire("stop") def stop(self): self._running = False def advance(self, seconds): """Advance this reactor C{seconds} into the future. This method is not part of the L{LandscapeReactor} API and is specific to L{FakeReactor}. It's meant to be used only in unit tests for advancing time and triggering the relevant scheduled calls (see also C{call_later} and C{call_every}). """ while (self._calls and self._calls[0][0] <= self._current_time + seconds): call = self._calls.pop(0) # If we find a call within the time we're advancing, # before calling it, let's advance the time *just* to # when that call is expecting to be run, so that if it # schedules any calls itself they will be relative to # the correct time. seconds -= call[0] - self._current_time self._current_time = call[0] try: call[1](*call[2], **call[3]) except Exception, e: logging.exception(e) self._current_time += seconds def _in_thread(self, callback, errback, f, args, kwargs): try: result = f(*args, **kwargs) except Exception, e: exc_info = sys.exc_info() if errback is None: self.call_in_main(logging.error, e, exc_info=exc_info) else: self.call_in_main(errback, *exc_info) else: if callback: self.call_in_main(callback, result) def _run_threaded_callbacks(self): while self._threaded_callbacks: try: self._threaded_callbacks.pop(0)() except Exception, e: logging.exception(e) def _hook_threaded_callbacks(self): id = self.call_every(0.5, self._run_threaded_callbacks) self._run_threaded_callbacks_id = id def _unhook_threaded_callbacks(self): self.cancel_call(self._run_threaded_callbacks_id) landscape-client-14.01/landscape/broker/0000755000175000017500000000000012301414317017753 5ustar andreasandreaslandscape-client-14.01/landscape/broker/exchange.py0000644000175000017500000007774112301414317022127 0ustar andreasandreas"""Manage outgoing and incoming messages when communicating with the server. The protocol to communicate between the client and the server has been designed to be very robust so that messages are not lost. In addition it is (vaguely) symmetric, as the client and server need to send messages both ways. Client->Server Payload ====================== All message payloads are bpickled with L{landscape.lib.bpickle.dumps}. Client to server payloads are C{dict}s of the form:: {'server-api': SERVER_API_VERSION, 'client-api': CLIENT_API_VERSION, 'sequence': SEQUENCE_NUMBER, 'accepted-types': SERVER_ACCEPTED_TYPES_DIGEST, 'messages': MESSAGES, 'total-messages': TOTAL_COUNT_OF_PENDING_MESSAGES, 'next-expected-sequence': EXPECTED_SEQUENCE_NUMBER, 'client-accepted-types': CLIENT_ACCEPTED_TYPES (optional)} The values have the following semantics: - C{SERVER_API_VERSION}: The API version that is required on the server in order to process the messages in this payload (the schema and semantics of message types are usually different for different API versions). - C{CLIENT_API_VERSION}: The API version of the client, hinting the server about the schema and semantics of the messages types accepted by the client (see below). - C{SEQUENCE_NUMBER}: A monotonically increasing nonnegative integer. The meaning of this is described below. - C{SERVER_ACCEPTED_TYPES_DIGEST}: A hash of the message types that the client thinks are currently accepted by the server. The server can use it to know whether to send the client a new up-to-date list of accepted message types. - C{MESSAGES}: A python list of messages, described below. - C{TOTAL_COUNT_OF_PENDING_MESSAGES}: The total number of messages in the client outgoing queue. This is includes the number of messages being sent in this payload, plus any other messages still pending and not included here. - C{EXPECTED_SEQUENCE_NUMBER}: The sequence number which the client expects the next message sent from the server to have. - C{CLIENT_ACCEPTED_TYPES}: Optionally, a list of message types that the client accepts. The server is supposed to send the client only messages of this type. It will be inlcuded in the payload only if the hash that the server sends us is out-of-date. This behavior is simmetric with respect to the C{SERVER_ACCEPTED_TYPES_DIGEST} field described above. Server->Client Payload ====================== The payloads that the server sends to not-yet-registered clients (i.e. clients that don't provide a secure ID associated with a computer) are C{dict}s of the form:: {'server-uuid': SERVER_UUID, 'messages': MESSAGES} where: - C{SERVER_UUID}: A string identifying the particular Landscape server the client is talking to. - C{MESSAGES}: A python list of messages, described below. Additionally, payloads to registered clients will include these fields:: {'next-expected-sequence': EXPECTED_SEQUENCE_NUMBER, 'client-accepted-types-hash': CLIENT_ACCEPTED_TYPES_DIGEST, where: - C{EXPECTED_SEQUENCE_NUMBER}: The sequence number which the server expects the next message sent from the client to have. - C{CLIENT_ACCEPTED_TYPES_DIGEST}: A hash of the message types that the server thinks are currently accepted by the client. The client can use it to know whether to send to the server an up-to-date list the message types it now accepts (see CLIENT_ACCEPTED_TYPES in the client->server payload). Individual Messages =================== A message is a C{dict} with required and optional keys. Messages are packed into Python lists and set as the value of the 'messages' key in the payload. The C{dict} of a single message is of the form:: {'type': MESSAGE_TYPE, ...} where: - C{MESSAGE_TYPE}: A simple string, which lets the server decide what handler to dispatch the message to, also considering the SERVER_API_VERSION value. - C{...}: Other entries specific to the type of message. Message Sequencing ================== A message numbering system is built in to the protocol to ensure robustness of client/server communication. The way this works is not totally symmetrical, as the client must connect to the server via HTTP, but the ordering that things happen in over the course of many connections remains the same (see also L{landscape.broker.store} for more concrete examples): - Receiver tells Sender which sequence number it expects the next batch of messages to start with. - Sender gives some messages to Receiver, specifying the sequence number of the first message. If the expected and actual sequence numbers are out of synch, Sender resynchronizes in a certain way. The client and server must play the part of *both* of these roles on every interaction, but it simplifies things to talk about them in terms of a single role at a time. When the client connects to the server, it does the following things acting in the role of Sender (which is by far its more burdened role): - Send a payload containing messages and a sequence number. The sequence number should be the same number that the server gave as next-expected-sequence in the prior connection, or 0 if there was no previous connection. - Get back a next-expected-sequence from the server. If that value is is not len(messages) + previous-next-expected, then resynchronize. It does the following when acting as Receiver: - Send a payload containing a next-expected-sequence, which should be the sequence number of the first message that the server responds with. This value should be previous-next-expected + len(previous_messages). - Receive some messages from the server, and process them immediately. When the server is acting as Sender, it does the following: - Wait for a payload with next-expected-sequence from the client. - Perhaps resynchronize if next-expected-sequence is unexpected. - Respond with a payload of messages to the client. No sequence identifier is given for this payload of messages, because it would be redundant with data that has already passed over the wire (received from the client) during the very same TCP connection. When the server is acting as a Receiver, it does the following: - Wait for a payload with a sequence identifier and a load of messages. - Respond with a next-expected-sequence. There are two interesting exceptional cases which must be handled with resynchronization: 1. Messages received with sequence numbers less than the next expected sequence number should be discarded, and further messages starting at the expected sequence numbers should be processed. 2. If the sequence number is higher than what the receiver expected, then no messages are processed and the receiver responds with the same {'next-expected-sequence': N}, so that the sender can resynchronize itself. This implies that the receiver must record the sequence number of the last successfully processed message, in order for it to respond to the sender with that number. In addition, the sender must save outbound messages even after they have been delivered over the transport, until the sender receives a next-expected-sequence higher than the outbound message. The details of this logic are described in L{landscape.broker.store}. Exchange Sequence ================= Diagram:: 1. BrokerService --> MessageExchange : Start 2. MessageExchange --> MessageExchange : Schedule exchange 3. [event] <-- MessageExchange : Fire "pre-exchange" 4. [optional] : Do registration (See L{landscape.broker.registration}) : sequence 5. MessageExchange --> MessageStore : Request pending : messages 6. MessageExchange <-- MessageStore : return( Messages ) 7. MessageExchange --> HTTPTransport : Exchange 8. HTTPTransport --> {Server}LandscapeMessageSystem : HTTP POST 9. [Scope: Server] | | 9.1 LandscapeMessageSystem --> ComputerMessageAPI : run | | 9.2 ComputerMessageAPI --> FunctionHandler : handle | | 9.3 FunctionHandler --> Callable : call | ( See also server code at: | - C{canonical.landscape.message.handlers} | - C{canonical.message.handler.FunctionHandler} ) | | | 9.4 [If: the callable raises ConsistencyError] | | | | 9.4.1 ComputerMessageAPI --> Computer : request | | : Resynchronize | | | | 9.4.2 Computer --> Computer : Create | | : ResynchronizeRequest | | : activity | | | --[End If] | | 9.5 ComputerMessageAPI --> Computer : get deliverable | : activities | | 9.6 ComputerMessageAPI <-- Computer : return activities | | 9.7 [Loop over activities] | | | | 9.7.1 ComputerMessageAPI --> Activity : deliver | | | | 9.7.2 Activity --> MessageStore : add activity message | | | --[End Loop] | | 9.8 ComputerMessageAPI --> MessageStore : get pending messages | | 9.9 ComputerMessageAPI <-- MessageStore : return messages | | 9.10 LandscapeMessageSystem <-- ComputerMessageAPI : return payload | : (See below) | -- [End Scope] 10. HTTPTransport <-- {Server}LandscapeMessageSystem : HTTP response : with payload 11. MessageExchange <-- HTTPTransport : response 12. [If: server says it expects a very old message] | | 12.1 [event] <-- MessageExchange : event | (See L{landscape.broker.server}) : "resynchronize- | : clients" | -- [End if] 13. [Loop: over messages in payload] | | 13.1 [event] <-- MessageExchange : event | : message (message) | | 13.2 [Switch: on message type] | | | |- 13.2.1 [Case: message type is "accepted-types"] | | | | | | 13.2.1.1 MessageExchange -> MessageStore | | | : set accepted types | | | | | | 13.2.1.2 MessageExchange -> MessageExchange | | | : schedule urgent | | | : exchange | | --[End Case] | | | |- 13.2.2 [Case: message type is "resynchronize"] | | | | | | 13.2.2.1 [event] <- MessageExchange | | | (See L{landscape.broker.server}) | | | : event | | | : "resynchronize- | | | : clients" | | | | | | 13.2.2.2 MessageExchange -> MessageStore | | | : add "resynchronize" | | | : message | | | | | | 13.2.2.3 MessageExchange -> MessageExchange | | | : schedule urgent | | | : exchange | | | | | --[End Case] | | | |- 13.2.3 [Case: message type is "set-intervals"] | | | | | | 13.2.3.1 MessageExchange -> BrokerConfiguration | | | : set exchange | | | : interval | | | | | --[End Case] | | | -- [End Switch] | -- [End Loop] 14. Schedule exchange """ import time import logging from landscape.lib.hashlib import md5 from twisted.internet.defer import Deferred, succeed from landscape.lib.message import got_next_expected, ANCIENT from landscape.log import format_delta from landscape import SERVER_API, CLIENT_API class MessageExchange(object): """Schedule and handle message exchanges with the server. The L{MessageExchange} is the place where messages are sent to go out to the Landscape server. It accumulates messages in its L{MessageStore} and periodically delivers them to the server. It is also the place where messages coming from the server are handled. For each message type the L{MessageExchange} supports setting an handler that will be invoked when a message of the that type is received. An exchange is performed with an HTTP POST request, whose body contains outgoing messages and whose response contains incoming messages. """ def __init__(self, reactor, store, transport, registration_info, exchange_store, config, max_messages=100): """ @param reactor: The L{LandscapeReactor} used to fire events in response to messages received by the server. @param store: The L{MessageStore} used to queue outgoing messages. @param transport: The L{HTTPTransport} used to deliver messages. @param registration_info: The L{Identity} storing our secure ID. @param config: The L{BrokerConfiguration} with the `exchange_interval` and `urgent_exchange_interval` parameters, respectively holding the time interval between subsequent exchanges of non-urgent messages, and the time interval between subsequent exchanges of urgent messages. """ self._reactor = reactor self._message_store = store self._transport = transport self._registration_info = registration_info self._config = config self._exchange_interval = config.exchange_interval self._urgent_exchange_interval = config.urgent_exchange_interval self._max_messages = max_messages self._notification_id = None self._exchange_id = None self._exchanging = False self._urgent_exchange = False self._client_accepted_types = set() self._client_accepted_types_hash = None self._message_handlers = {} self._exchange_store = exchange_store self._stopped = False self.register_message("accepted-types", self._handle_accepted_types) self.register_message("resynchronize", self._handle_resynchronize) self.register_message("set-intervals", self._handle_set_intervals) reactor.call_on("resynchronize-clients", self._resynchronize) def _message_is_obsolete(self, message): """Returns C{True} if message is obsolete. A message is considered obsolete if the secure ID changed since it was received. """ if 'operation-id' not in message: return False operation_id = message['operation-id'] context = self._exchange_store.get_message_context(operation_id) if context is None: logging.warning( "No message context for message with operation-id: %s" % operation_id) return False # Compare the current secure ID with the one that was in effect when # the request message was received. result = self._registration_info.secure_id != context.secure_id context.remove() return result def send(self, message, urgent=False): """Include a message to be sent in an exchange. If urgent is True, an exchange with the server will be scheduled urgently. @param message: Same as in L{MessageStore.add}. """ if self._message_is_obsolete(message): logging.info( "Response message with operation-id %s was discarded " "because the client's secure ID has changed in the meantime" % message.get('operation-id')) return None if "timestamp" not in message: message["timestamp"] = int(self._reactor.time()) message_id = self._message_store.add(message) if urgent: self.schedule_exchange(urgent=True) return message_id def start(self): """Start scheduling exchanges. The first one will be urgent.""" self.schedule_exchange(urgent=True) def stop(self): """Stop scheduling exchanges.""" if self._exchange_id is not None: # Cancel the next scheduled exchange self._reactor.cancel_call(self._exchange_id) self._exchange_id = None if self._notification_id is not None: # Cancel the next scheduled notification of an impending exchange self._reactor.cancel_call(self._notification_id) self._notification_id = None self._stopped = True def _handle_accepted_types(self, message): """ When the server updates us about the types of message it accepts, update our message store. If this makes existing held messages available for sending, urgently exchange messages. If new types are made available or old types are dropped a C{("message-type-acceptance-changed", type, bool)} reactor event will be fired. """ old_types = set(self._message_store.get_accepted_types()) new_types = set(message["types"]) diff = get_accepted_types_diff(old_types, new_types) self._message_store.set_accepted_types(new_types) logging.info("Accepted types changed: %s", diff) if self._message_store.get_pending_messages(1): self.schedule_exchange(urgent=True) for type in old_types - new_types: self._reactor.fire("message-type-acceptance-changed", type, False) for type in new_types - old_types: self._reactor.fire("message-type-acceptance-changed", type, True) def _handle_resynchronize(self, message): opid = message["operation-id"] scopes = message.get("scopes") self.send({"type": "resynchronize", "operation-id": opid}) self._reactor.fire("resynchronize-clients", scopes=scopes) def _resynchronize(self, scopes=None): # When re-synchronisation occurs we don't want any previous messages # being sent to the server, dropping the existing session_ids means # that messages sent with those IDs will be dropped by the broker. self._message_store.drop_session_ids(scopes) self.schedule_exchange(urgent=True) def _handle_set_intervals(self, message): if "exchange" in message: self._config.exchange_interval = message["exchange"] logging.info("Exchange interval set to %d seconds." % self._config.exchange_interval) if "urgent-exchange" in message: self._config.urgent_exchange_interval = message["urgent-exchange"] logging.info("Urgent exchange interval set to %d seconds." % self._config.urgent_exchange_interval) self._config.write() def exchange(self): """Send pending messages to the server and process responses. A C{pre-exchange} reactor event will be emitted just before the actual exchange takes place. An C{exchange-done} or C{exchange-failed} reactor event will be emitted after a successful or failed exchange. @return: A L{Deferred} that is fired when exchange has completed. """ if self._exchanging: return succeed(None) self._exchanging = True self._reactor.fire("pre-exchange") payload = self._make_payload() start_time = time.time() if self._urgent_exchange: logging.info("Starting urgent message exchange with %s." % self._transport.get_url()) else: logging.info("Starting message exchange with %s." % self._transport.get_url()) deferred = Deferred() def exchange_completed(): self.schedule_exchange(force=True) self._reactor.fire("exchange-done") logging.info("Message exchange completed in %s.", format_delta(time.time() - start_time)) deferred.callback(None) def handle_result(result): self._exchanging = False if result: if self._urgent_exchange: logging.info("Switching to normal exchange mode.") self._urgent_exchange = False self._handle_result(payload, result) self._message_store.record_success(int(self._reactor.time())) else: self._reactor.fire("exchange-failed") logging.info("Message exchange failed.") exchange_completed() def handle_failure(failure_type, failure_value, failure_tb): self._exchanging = False self._reactor.fire("exchange-failed") self._message_store.record_failure(int(self._reactor.time())) logging.info("Message exchange failed.") exchange_completed() self._reactor.call_in_thread(handle_result, handle_failure, self._transport.exchange, payload, self._registration_info.secure_id, self._get_exchange_token(), payload.get("server-api")) return deferred def is_urgent(self): """Return a bool showing whether there is an urgent exchange scheduled. """ return self._urgent_exchange def schedule_exchange(self, urgent=False, force=False): """Schedule an exchange to happen. The exchange will occur after some time based on whether C{urgent} is True. An C{impending-exchange} reactor event will be emitted approximately 10 seconds before the exchange is started. @param urgent: If true, ensure an exchange happens within the urgent interval. This will reschedule the exchange if necessary. If another urgent exchange is already scheduled, nothing happens. @param force: If true, an exchange will necessarily be scheduled, even if it was already scheduled before. """ if self._stopped: return # The 'not self._exchanging' check below is currently untested. # It's a bit tricky to test as it is preventing rehooking 'exchange' # while there's a background thread doing the exchange itself. if (not self._exchanging and (force or self._exchange_id is None or urgent and not self._urgent_exchange)): if urgent: self._urgent_exchange = True if self._exchange_id: self._reactor.cancel_call(self._exchange_id) if self._urgent_exchange: interval = self._config.urgent_exchange_interval else: interval = self._config.exchange_interval if self._notification_id is not None: self._reactor.cancel_call(self._notification_id) notification_interval = interval - 10 self._notification_id = self._reactor.call_later( notification_interval, self._notify_impending_exchange) self._exchange_id = self._reactor.call_later( interval, self.exchange) def _get_exchange_token(self): """Get the token given us by the server at the last exchange. It will be C{None} if we are not fully registered yet or if something bad happened during the last exchange and we could not get the token that the server had given us. """ exchange_token = self._message_store.get_exchange_token() # Before starting the exchange set the saved token to None. This will # prevent us from locking ourselves out if the exchange fails or if we # crash badly, while the server has saved a new token that we couldn't # receive or persist (this works because if the token is None the # server will be forgiving and will authenticate us based only on the # secure ID we provide). self._message_store.set_exchange_token(None) self._message_store.commit() return exchange_token def _notify_impending_exchange(self): self._reactor.fire("impending-exchange") def _make_payload(self): """Return a dict representing the complete exchange payload. The payload will contain all pending messages eligible for delivery, up to a maximum of C{max_messages} as passed to the L{__init__} method. """ store = self._message_store accepted_types_digest = self._hash_types(store.get_accepted_types()) messages = store.get_pending_messages(self._max_messages) total_messages = store.count_pending_messages() if messages: # Each message is tagged with the API that the client was # using at the time the message got added to the store. The # logic below will make sure that all messages which are added # to the payload being built will have the same api, and any # other messages will be postponed to the next exchange. server_api = messages[0].get("api") for i, message in enumerate(messages): if message.get("api") != server_api: break else: i = None if i is not None: del messages[i:] # DEPRECATED Remove this once API 2.0 is gone: if server_api is None: # The per-message API logic was introduced on API 2.1, so a # missing API must be 2.0. server_api = "2.0" else: server_api = SERVER_API payload = {"server-api": server_api, "client-api": CLIENT_API, "sequence": store.get_sequence(), "accepted-types": accepted_types_digest, "messages": messages, "total-messages": total_messages, "next-expected-sequence": store.get_server_sequence()} accepted_client_types = self.get_client_accepted_message_types() accepted_client_types_hash = self._hash_types(accepted_client_types) if accepted_client_types_hash != self._client_accepted_types_hash: payload["client-accepted-types"] = accepted_client_types return payload def _hash_types(self, types): accepted_types_str = ";".join(types) return md5(accepted_types_str).digest() def _handle_result(self, payload, result): """Handle a response from the server. Called by L{exchange} after a batch of messages has been successfully delivered to the server. If the C{server_uuid} changed, a C{"server-uuid-changed"} event will be fired. Call L{handle_message} for each message in C{result}. @param payload: The payload that was sent to the server. @param result: The response got in reply to the C{payload}. """ message_store = self._message_store self._client_accepted_types_hash = result.get( "client-accepted-types-hash") next_expected = result.get("next-expected-sequence") old_sequence = message_store.get_sequence() if next_expected is None: # If the server doesn't specify anything for the next-expected # value, just assume that it processed all messages that we sent # fine. next_expected = message_store.get_sequence() next_expected += len(payload["messages"]) message_store_state = got_next_expected(message_store, next_expected) if message_store_state == ANCIENT: # The server has probably lost some data we sent it. The # slate has been wiped clean (by got_next_expected), now # let's fire an event to tell all the plugins that they # ought to generate new messages so the server gets some # up-to-date data. logging.info("Server asked for ancient data: resynchronizing all " "state with the server.") self.send({"type": "resynchronize"}) self._reactor.fire("resynchronize-clients") # Save the exchange token that the server has sent us. We will provide # it at the next exchange to prove that we're still the same client. # See also landscape.broker.transport. message_store.set_exchange_token(result.get("next-exchange-token")) old_uuid = message_store.get_server_uuid() new_uuid = result.get("server-uuid") if new_uuid != old_uuid: logging.info("Server UUID changed (old=%s, new=%s)." % (old_uuid, new_uuid)) self._reactor.fire("server-uuid-changed", old_uuid, new_uuid) message_store.set_server_uuid(new_uuid) message_store.commit() sequence = message_store.get_server_sequence() for message in result.get("messages", ()): self.handle_message(message) sequence += 1 message_store.set_server_sequence(sequence) message_store.commit() if message_store.get_pending_messages(1): logging.info("Pending messages remain after the last exchange.") # Either the server asked us for old messages, or we # otherwise have more messages even after transferring # what we could. if next_expected != old_sequence: self.schedule_exchange(urgent=True) def register_message(self, type, handler): """Register a handler for the given message type. The C{handler} callable will to be executed when a message of type C{type} has been received from the server. Multiple handlers for the same type will be called in the order they were registered. """ self._message_handlers.setdefault(type, []).append(handler) self._client_accepted_types.add(type) def handle_message(self, message): """ Handle a message received from the server. Any message handlers registered with L{register_message} will be called. """ if 'operation-id' in message: # This is a message that requires a response. Store the secure ID # so we can check for obsolete results later. self._exchange_store.add_message_context( message['operation-id'], self._registration_info.secure_id, message['type']) self._reactor.fire("message", message) # This has plan interference! but whatever. if message["type"] in self._message_handlers: for handler in self._message_handlers[message["type"]]: handler(message) def register_client_accepted_message_type(self, type): # stringify the type because it's a dbus.String. It should work # anyway, but this is just for sanity and less confusing logs. self._client_accepted_types.add(str(type)) def get_client_accepted_message_types(self): return sorted(self._client_accepted_types) def get_accepted_types_diff(old_types, new_types): old_types = set(old_types) new_types = set(new_types) added_types = new_types - old_types stable_types = old_types & new_types removed_types = old_types - new_types diff = [] diff.extend(["+%s" % type for type in added_types]) diff.extend(["%s" % type for type in stable_types]) diff.extend(["-%s" % type for type in removed_types]) return " ".join(diff) landscape-client-14.01/landscape/broker/ping.py0000644000175000017500000001341112301414317021262 0ustar andreasandreas""" Implementation of a lightweight exchange-triggering mechanism via small HTTP requests asking if we should do a full exchange. Ping Sequence ============= Diagram:: 1. BrokerService --> Pinger : Start 2. [Loop forever] | | 2.1 Pinger --> PingClient : Schedule Ping | | 2.2 PingClient --> {Server} WebPing : Ping | | 2.3 PingClient <-- {Server} WebPing : return(messages waiting? | : [Boolean]) | | 2.4 Pinger <-- PingClient : return(messages waiting? | [Boolean]) | | 2.5 [If: messages waiting == True ] | | | | 2.5.1 Pinger --> MessageExchange : Schedule urgent exchange | | | --[End If] | | 2.6 [Wait: for ping interval to expire] | --[End Loop] """ import urllib from logging import info from twisted.python.failure import Failure from twisted.internet import defer from landscape.lib.bpickle import loads from landscape.lib.fetch import fetch from landscape.lib.log import log_failure class PingClient(object): """An HTTP client which knows how to talk to the ping server.""" def __init__(self, reactor, get_page=None): if get_page is None: get_page = fetch self._reactor = reactor self.get_page = get_page def ping(self, url, insecure_id): """Ask the question: are there messages for this computer ID? @param url: The URL of the ping server to hit. @param insecure_id: This client's insecure ID, if C{None} no HTTP request will be performed and the result will be C{False}. @return: A deferred resulting in True if there are messages and False otherwise. """ if insecure_id is not None: headers = {"Content-Type": "application/x-www-form-urlencoded"} data = urllib.urlencode({"insecure_id": insecure_id}) page_deferred = defer.Deferred() def errback(type, value, tb): page_deferred.errback(Failure(value, type, tb)) self._reactor.call_in_thread(page_deferred.callback, errback, self.get_page, url, post=True, data=data, headers=headers) page_deferred.addCallback(self._got_result) return page_deferred return defer.succeed(False) def _got_result(self, webtext): """ Given a response that came from a ping server, return True if the response indicates that their are messages waiting for this computer, False otherwise. """ if loads(webtext) == {"messages": True}: return True class Pinger(object): """ A plugin which pings the Landscape server with HTTP requests to see if a full exchange should be initiated. @param reactor: The reactor to schedule calls with. @param identity: The L{Identity} holding the insecure ID used when pinging. @param exchanger: The L{MessageExchange} to trigger exchanges with. @param config: The L{BrokerConfiguration} to get the 'ping_url' and 'ping_interval' parameters from. The 'ping_url' specifies what URL to hit when pinging, and 'ping_interval' how frequently to ping. Changes in the configuration object will take effect from the next scheduled ping. """ def __init__(self, reactor, identity, exchanger, config, ping_client_factory=PingClient): self._config = config self._identity = identity self._reactor = reactor self._exchanger = exchanger self._call_id = None self._ping_client = None self.ping_client_factory = ping_client_factory reactor.call_on("message", self._handle_set_intervals) def get_url(self): return self._config.ping_url def get_interval(self): return self._config.ping_interval def start(self): """Start pinging.""" self._ping_client = self.ping_client_factory(self._reactor) self._schedule() def ping(self): """Perform a ping; if there are messages, fire an exchange.""" deferred = self._ping_client.ping( self._config.ping_url, self._identity.insecure_id) deferred.addCallback(self._got_result) deferred.addErrback(self._got_error) deferred.addBoth(lambda _: self._schedule()) def _got_result(self, exchange): if exchange: info("Ping indicates message available. " "Scheduling an urgent exchange.") self._exchanger.schedule_exchange(urgent=True) def _got_error(self, failure): log_failure(failure, "Error contacting ping server at %s" % (self._ping_client.url,)) def _schedule(self): """Schedule a new ping using the current ping interval.""" self._call_id = self._reactor.call_later(self._config.ping_interval, self.ping) def _handle_set_intervals(self, message): if message["type"] == "set-intervals" and "ping" in message: self._config.ping_interval = message["ping"] self._config.write() info("Ping interval set to %d seconds." % self._config.ping_interval) if self._call_id is not None: self._reactor.cancel_call(self._call_id) self._schedule() def stop(self): """Stop pinging the message server.""" if self._call_id is not None: self._reactor.cancel_call(self._call_id) self._call_id = None class FakePinger(object): def __init__(self, *args, **kwargs): pass def start(self): pass landscape-client-14.01/landscape/broker/service.py0000644000175000017500000000753712301414317022001 0ustar andreasandreas"""Deployment code for the monitor.""" import os from landscape.lib.fetch import fetch_async from landscape.service import LandscapeService, run_landscape_service from landscape.amp import ComponentPublisher from landscape.broker.registration import RegistrationHandler, Identity from landscape.broker.config import BrokerConfiguration from landscape.broker.transport import HTTPTransport from landscape.broker.exchange import MessageExchange from landscape.broker.exchangestore import ExchangeStore from landscape.broker.ping import Pinger from landscape.broker.store import get_default_message_store from landscape.broker.server import BrokerServer class BrokerService(LandscapeService): """The core C{Service} of the Landscape Broker C{Application}. The Landscape broker service handles all the communication between the client and server. When started it creates and runs all necessary components to exchange messages with the Landscape server. @cvar service_name: C{broker} @ivar persist_filename: Path to broker-specific persistent data. @ivar persist: A L{Persist} object saving and loading data from C{self.persist_filename}. @ivar message_store: A L{MessageStore} used by the C{exchanger} to queue outgoing messages. @ivar transport: An L{HTTPTransport} used by the C{exchanger} to deliver messages. @ivar identity: The L{Identity} of the Landscape client the broker runs on. @ivar exchanger: The L{MessageExchange} exchanges messages with the server. @ivar pinger: The L{Pinger} checks if the server has new messages for us. @ivar registration: The L{RegistrationHandler} performs the initial registration. @param config: A L{BrokerConfiguration}. """ transport_factory = HTTPTransport pinger_factory = Pinger service_name = BrokerServer.name def __init__(self, config): self.persist_filename = os.path.join( config.data_path, "%s.bpickle" % (self.service_name,)) super(BrokerService, self).__init__(config) self.transport = self.transport_factory( self.reactor, config.url, config.ssl_public_key) self.message_store = get_default_message_store( self.persist, config.message_store_path) self.identity = Identity(self.config, self.persist) exchange_store = ExchangeStore(self.config.exchange_store_path) self.exchanger = MessageExchange( self.reactor, self.message_store, self.transport, self.identity, exchange_store, config) self.pinger = self.pinger_factory( self.reactor, self.identity, self.exchanger, config) self.registration = RegistrationHandler( config, self.identity, self.reactor, self.exchanger, self.pinger, self.message_store, fetch_async) self.broker = BrokerServer(self.config, self.reactor, self.exchanger, self.registration, self.message_store, self.pinger) self.publisher = ComponentPublisher(self.broker, self.reactor, self.config) def startService(self): """Start the broker. Create a L{BrokerServer} listening on C{broker_socket_path} for clients connecting with the L{BrokerServerConnector}, and start the L{MessageExchange} and L{Pinger} services. """ super(BrokerService, self).startService() self.publisher.start() self.exchanger.start() self.pinger.start() def stopService(self): """Stop the broker.""" self.publisher.stop() self.exchanger.stop() self.pinger.stop() super(BrokerService, self).stopService() def run(args): """Run the application, given some command line arguments.""" run_landscape_service(BrokerConfiguration, BrokerService, args) landscape-client-14.01/landscape/broker/store.py0000644000175000017500000004605012301414317021466 0ustar andreasandreas"""Message storage. The sequencing system we use in the message store may be quite confusing if you haven't looked at it in the last 10 minutes. For that reason, let's review the mechanics here. Our goal is to implement a reasonably robust system for delivering messages from us to our peer. The system should be smart enough to recover if the peer happens to lose messages that we have already sent, provided that these messages are not too old (we'll see below what 'too old' means). Messages added to the store are identified by increasing natural numbers, the first message added is identified by 0, the second by 1, and so on. We call "sequence" the number identifying the next message that we want to send. For example, if the store has been added ten messages (that we represent with uppercase letters) and we want start sending the first of them, our store would like like:: sequence: 0 messages: A, B, C, D, E, F, G, H, I, J ^ The "^" marker is what we call "pending offset" and is the displacement of the message we want to send next from the first message we have in the store. Let's say we now send to our peer a batch of 3 sequential messages. In the payload we include the body of the messages being sent and the sequence, which identifies the first message of the batch. In this case the payload would look like (pseudo-code):: (sequence: 0, messages: A, B, C) If everything works fine on the other end, our peer replies with a payload that would like:: (next-expected-sequence: 4) meaning that the peer has received all the three messages that we sent, and so the next message it expects to receive is the one identified by the number 4. At this point we update both our pending offset and our sequence values, and the store now looks like:: sequence: 4 messages: A, B, C, D, E, F, G, H, I, J ^ Great, now let's pretend that we send another batch, this time with five messages:: (sequence: 4, messages: D, E, F, G, H) Our peer receives them fine responding with a payload looking like:: (next-expected-sequence: 9) meaning that it received all the eight messages we sent so far and it's waiting for the ninth. This is the second successful batch that we send in a row, so we can be reasonably confident that at least the messages in the first batch are not really needed anymore. We delete them and we update our sequence and pending offset accordingly:: sequence: 9 messages: D, E, F, G, H, I, J ^ Note that we still want to keep around the messages we sent in the very last batch, just in case. Indeed we now try to send a third batch with the last two messages that we have, but our peer surprisingly replies us with this payload:: (next-expected-sequence: 6) Ouch! This means that something bad happened and our peer has somehow lost not only the two messages that we sent in the last batch, but also the last three messages of the former batch :( Luckly we've kept enough old messages around that we can try to send them again, we update our sequence and pending offset and the store looks like:: sequence: 6 messages: D, E, F, G, H, I, J ^ We can now start again sending messages using the same strategy. Note however that in the worst case scenario we could receive from our peer a next-expected-sequence value which is so old to be outside our buffer of already-sent messages. In that case there is now way we can recover the lost messages, and we'll just send the oldest one that we have. See L{MessageStore} for details about how messages are stored on the file system and L{landscape.lib.message.got_next_expected} to check how the strategy for updating the pending offset and the sequence is implemented. """ import itertools import logging import os import uuid from landscape.lib import bpickle from landscape.lib.fs import create_file from landscape import SERVER_API HELD = "h" BROKEN = "b" class MessageStore(object): """A message store which stores its messages in a file system hierarchy. Beside the "sequence" and the "pending offset" values described in the module docstring above, the L{MessageStore} also stores what we call "server sequence", which is the next message number expected by the *client* itself (because we are in turn the peer of a specular message system running in the server, which tries to deliver messages to us). The server sequence is entirely unrelated to the stored messages, but is incremented when successfully receiving messages from the server, in the very same way described above but with the roles inverted. @param persist: a L{Persist} used to save state parameters like the accepted message types, sequence, server uuid etc. @param directory: base of the file system hierarchy """ api = SERVER_API def __init__(self, persist, directory, directory_size=1000): self._directory = directory self._directory_size = directory_size self._schemas = {} self._original_persist = persist self._persist = persist.root_at("message-store") message_dir = self._message_dir() if not os.path.isdir(message_dir): os.makedirs(message_dir) def commit(self): """Persist metadata to disk.""" self._original_persist.save() def set_accepted_types(self, types): """Specify the types of messages that the server will expect from us. If messages are added to the store which are not currently accepted, they will be saved but ignored until their type is accepted. """ assert type(types) in (tuple, list, set) self._persist.set("accepted-types", sorted(set(types))) self._reprocess_holding() def get_accepted_types(self): """Get a list of all accepted message types.""" return self._persist.get("accepted-types", ()) def accepts(self, type): """Return bool indicating if C{type} is an accepted message type.""" return type in self.get_accepted_types() def get_sequence(self): """Get the current sequence. @return: The sequence number of the message that the server expects us to send on the next exchange. """ return self._persist.get("sequence", 0) def set_sequence(self, number): """Set the current sequence. Set the sequence number of the message that the server expects us to send on the next exchange. """ self._persist.set("sequence", number) def get_server_sequence(self): """Get the current server sequence. @return: the sequence number of the message that we will ask the server to send to us on the next exchange. """ return self._persist.get("server_sequence", 0) def set_server_sequence(self, number): """Set the current server sequence. Set the sequence number of the message that we will ask the server to send to us on the next exchange. """ self._persist.set("server_sequence", number) def get_server_uuid(self): """Return the currently set server UUID.""" return self._persist.get("server_uuid") def set_server_uuid(self, uuid): """Change the known UUID from the server we're communicating to.""" self._persist.set("server_uuid", uuid) def get_exchange_token(self): """Get the authentication token to use for the next exchange.""" return self._persist.get("exchange_token") def set_exchange_token(self, token): """Set the authentication token to use for the next exchange.""" self._persist.set("exchange_token", token) def get_pending_offset(self): """Get the current pending offset.""" return self._persist.get("pending_offset", 0) def set_pending_offset(self, val): """Set the current pending offset. Set the offset into the message pool to consider assigned to the current sequence number as returned by l{get_sequence}. """ self._persist.set("pending_offset", val) def add_pending_offset(self, val): """Increment the current pending offset by C{val}.""" self.set_pending_offset(self.get_pending_offset() + val) def count_pending_messages(self): """Return the number of pending messages.""" return sum(1 for x in self._walk_pending_messages()) def get_pending_messages(self, max=None): """Get any pending messages that aren't being held, up to max.""" accepted_types = self.get_accepted_types() messages = [] for filename in self._walk_pending_messages(): if max is not None and len(messages) >= max: break data = self._get_content(self._message_dir(filename)) try: message = bpickle.loads(data) except ValueError, e: logging.exception(e) self._add_flags(filename, BROKEN) else: if message["type"] not in accepted_types: self._add_flags(filename, HELD) else: messages.append(message) return messages def delete_old_messages(self): """Delete messages which are unlikely to be needed in the future.""" for fn in itertools.islice(self._walk_messages(exclude=HELD + BROKEN), self.get_pending_offset()): os.unlink(fn) containing_dir = os.path.split(fn)[0] if not os.listdir(containing_dir): os.rmdir(containing_dir) def delete_all_messages(self): """Remove ALL stored messages.""" self.set_pending_offset(0) for filename in self._walk_messages(): os.unlink(filename) def add_schema(self, schema): """Add a schema to be applied to messages of the given type. The schema must be an instance of L{landscape.schema.Message}. """ self._schemas[schema.type] = schema def is_pending(self, message_id): """Return bool indicating if C{message_id} still hasn't been delivered. @param message_id: Identifier returned by the L{add()} method. """ i = 0 pending_offset = self.get_pending_offset() for filename in self._walk_messages(exclude=BROKEN): flags = self._get_flags(filename) if ((HELD in flags or i >= pending_offset) and os.stat(filename).st_ino == message_id): return True if BROKEN not in flags and HELD not in flags: i += 1 return False def record_success(self, timestamp): """Record a successful exchange.""" self._persist.remove("first-failure-time") self._persist.remove("blackhole-messages") def record_failure(self, timestamp): """ Record a failed exchange, if all exchanges for the past week have failed then blackhole any future ones and request a full re-sync. """ if not self._persist.has("first-failure-time"): self._persist.set("first-failure-time", timestamp) continued_failure_time = timestamp - self._persist.get( "first-failure-time") if self._persist.get("blackhole-messages"): # Already added the resync message return if continued_failure_time > (60 * 60 * 24 * 7): # reject all messages after a week of not exchanging self.add({"type": "resynchronize"}) self._persist.set("blackhole-messages", True) logging.warning( "Unable to succesfully communicate with Landscape server " "for more than a week. Waiting for resync.") def add(self, message): """Queue a message for delivery. @param message: a C{dict} with a C{type} key and other keys conforming to the L{Message} schema for that specific message type. @return: message_id, which is an identifier for the added message or C{None} if the message was rejected. """ assert "type" in message if self._persist.get("blackhole-messages"): logging.debug("Dropped message, awaiting resync.") return message = self._schemas[message["type"]].coerce(message) if "api" not in message: message["api"] = self.api message_data = bpickle.dumps(message) filename = self._get_next_message_filename() temp_path = filename + ".tmp" create_file(temp_path, message_data) os.rename(temp_path, filename) if not self.accepts(message["type"]): filename = self._set_flags(filename, HELD) # For now we use the inode as the message id, as it will work # correctly even faced with holding/unholding. It will break # if the store is copied over for some reason, but this shouldn't # present an issue given the current uses. In the future we # should have a nice transactional storage (e.g. sqlite) which # will offer a more strong primary key. message_id = os.stat(filename).st_ino return message_id def _get_next_message_filename(self): message_dirs = self._get_sorted_filenames() if message_dirs: newest_dir = message_dirs[-1] else: os.makedirs(self._message_dir("0")) newest_dir = "0" message_filenames = self._get_sorted_filenames(newest_dir) if not message_filenames: filename = self._message_dir(newest_dir, "0") elif len(message_filenames) < self._directory_size: filename = str(int(message_filenames[-1].split("_")[0]) + 1) filename = self._message_dir(newest_dir, filename) else: newest_dir = self._message_dir(str(int(newest_dir) + 1)) os.makedirs(newest_dir) filename = os.path.join(newest_dir, "0") return filename def _walk_pending_messages(self): """Walk the files which are definitely pending.""" pending_offset = self.get_pending_offset() for i, filename in enumerate( self._walk_messages(exclude=HELD + BROKEN)): if i >= pending_offset: yield filename def _walk_messages(self, exclude=None): if exclude: exclude = set(exclude) message_dirs = self._get_sorted_filenames() for message_dir in message_dirs: for filename in self._get_sorted_filenames(message_dir): flags = set(self._get_flags(filename)) if (not exclude or not exclude & flags): yield self._message_dir(message_dir, filename) def _get_sorted_filenames(self, dir=""): message_files = [x for x in os.listdir(self._message_dir(dir)) if not x.endswith(".tmp")] message_files.sort(key=lambda x: int(x.split("_")[0])) return message_files def _message_dir(self, *args): return os.path.join(self._directory, *args) def _get_content(self, filename): file = open(filename) try: return file.read() finally: file.close() def _reprocess_holding(self): """ Unhold accepted messages left behind, and hold unaccepted pending messages. """ offset = 0 pending_offset = self.get_pending_offset() accepted_types = self.get_accepted_types() for old_filename in self._walk_messages(): flags = self._get_flags(old_filename) try: message = bpickle.loads(self._get_content(old_filename)) except ValueError, e: logging.exception(e) if HELD not in flags: offset += 1 else: accepted = message["type"] in accepted_types if HELD in flags: if accepted: new_filename = self._get_next_message_filename() os.rename(old_filename, new_filename) self._set_flags(new_filename, set(flags) - set(HELD)) else: if not accepted and offset >= pending_offset: self._set_flags(old_filename, set(flags) | set(HELD)) offset += 1 def _get_flags(self, path): basename = os.path.basename(path) if "_" in basename: return basename.split("_")[1] return "" def _set_flags(self, path, flags): dirname, basename = os.path.split(path) new_path = os.path.join(dirname, basename.split("_")[0]) if flags: new_path += "_" + "".join(sorted(set(flags))) os.rename(path, new_path) return new_path def _add_flags(self, path, flags): self._set_flags(path, self._get_flags(path) + flags) def get_session_id(self, scope=None): """Generate a unique session identifier, persist it and return it. See also L{landscape.broker.server.BrokerServer.get_session_id} for more information on what this is used for. @param scope: A string identifying the scope of interest of requesting object. Currently this is unused but it has been implemented in preparation for a fix for bug #300278 so that we don't have to change the persisted structure later. When that fix is in place this will allow us to re-synchronise only certain types of information, limited by scope. """ session_ids = self._persist.get("session-ids", {}) for session_id, stored_scope in session_ids.iteritems(): # This loop should be relatively short as it's intent is to limit # session-ids to one per scope. The or condition here is not # strictly necessary, but we *should* do "is" comparisons when we # can (so says PEP 8). if scope is stored_scope or scope == stored_scope: return session_id session_id = str(uuid.uuid4()) session_ids[session_id] = scope self._persist.set("session-ids", session_ids) return session_id def is_valid_session_id(self, session_id): """ Returns L{True} if the provided L{session_id} is known by this L{MessageStore}. """ return session_id in self._persist.get("session-ids", {}) def drop_session_ids(self, scopes=None): """Drop all session ids.""" new_session_ids = {} if scopes: session_ids = self._persist.get("session-ids", {}) for session_id, session_scope in session_ids.iteritems(): if session_scope not in scopes: new_session_ids[session_id] = session_scope self._persist.set("session-ids", new_session_ids) def get_default_message_store(*args, **kwargs): """ Get a L{MessageStore} object with all Landscape message schemas added. """ from landscape. message_schemas import message_schemas store = MessageStore(*args, **kwargs) for schema in message_schemas.values(): store.add_schema(schema) return store landscape-client-14.01/landscape/broker/server.py0000644000175000017500000003415312301414317021641 0ustar andreasandreas"""Bridge client side plugins to the C{MessageExchange}. The C{BrokerServer} provides C{BrokerClient}s with a mechanism to send messages to the server and, likewise, triggers those plugins to take action when a exchange is impending or resynchronisaton is required. Each C{BrokerClient} has to be registered using the L{BrokerServer.register_client} method, after which two way communications is possible between the C{BrokerServer} and the C{BrokerClient}. Resynchronisation Sequence ========================== See L{landscape.broker.exchange} sequence diagram for origin of the "resynchronize-clients" event. Diagram:: 1. [event 1] ---> BrokerServer : Event : "resynchronize-clients" 2. [event 2] <--- BrokerServer : Broadcast event : "resynchronize" 3. [optional: various L{BrowserClientPlugin}s respond to the "resynchronize" event to reset themselves and start report afresh.] (See: L{landscape.monitor.packagemonitor.PackageMonitor} L{landscape.monitor.plugin.MonitorPlugin} L{landscape.manager.keystonetoken.KeystoneToken} L{landscape.monitor.activeprocessinfo.ActiveProcessInfo} ) 4. [event 1] ---> MessageExchange : Event (NOTE, this is the same event as step 1 : "resynchronize-clients" it is handled by both BrokerServer and MessageExchange. See MessageExchange._resynchronize ) 5. MessageExchange ---> MessageExchange : Schedule urgent : exchange """ import logging from twisted.internet.defer import Deferred from landscape.lib.twisted_util import gather_results from landscape.amp import remote from landscape.manager.manager import FAILED def event(method): """Turns a L{BrokerServer} method into an event broadcaster. When the decorated method is called, an event is fired on all connected clients. The event will have the same name as the method being called, except that any underscore in the method name will be replaced with a dash. """ event_type = method.__name__.replace("_", "-") def broadcast_event(self, *args, **kwargs): fired = [] for client in self.get_clients(): fired.append(client.fire_event(event_type, *args, **kwargs)) return gather_results(fired) return broadcast_event class BrokerServer(object): """ A broker server capable of handling messages from plugins connected using the L{BrokerProtocol}. @param config: The L{BrokerConfiguration} used by the broker. @param reactor: The L{LandscapeReactor} driving the broker's events. @param exchange: The L{MessageExchange} to send messages with. @param registration: The {RegistrationHandler}. @param message_store: The broker's L{MessageStore}. """ name = "broker" def __init__(self, config, reactor, exchange, registration, message_store, pinger): from landscape.broker.amp import get_component_registry self.connectors_registry = get_component_registry() self._config = config self._reactor = reactor self._exchanger = exchange self._registration = registration self._message_store = message_store self._registered_clients = {} self._connectors = {} self._pinger = pinger reactor.call_on("message", self.broadcast_message) reactor.call_on("impending-exchange", self.impending_exchange) reactor.call_on("message-type-acceptance-changed", self.message_type_acceptance_changed) reactor.call_on("server-uuid-changed", self.server_uuid_changed) reactor.call_on("package-data-changed", self.package_data_changed) reactor.call_on("resynchronize-clients", self.resynchronize) @remote def ping(self): """Return C{True}.""" return True @remote def get_session_id(self, scope=None): """Get a unique session ID to be used when sending messages. Anything that wants to send a message to the server via the broker is required to first acquire a session ID with this method. Such session IDs must be passed to L{send_message} whenever sending a message. The broker keeps track of the session IDs that it hands out and will drop them when a re-synchronisation event occurs. Further messages sent using expired IDs will be silently discarded. For example each L{BrokerClientPlugin} calls this method to get a session ID and use it when sending messages, until the plugin gets notified of a re-synchronisation event and then asks for a new one. This eliminates issues with out-of-date messages being delivered to the server after a re-synchronisation request. For example when the client re-registers and gets a new computer ID we don't want to deliver messages containing references to activity IDs of the old computer (e.g. a message with the result of a "change-packages" activity delivered before re-registering). See also #328005 and #1158822. """ return self._message_store.get_session_id(scope=scope) @remote def register_client(self, name): """Register a broker client called C{name}. Various broker clients interact with the broker server, such as the monitor for example, using the L{BrokerServerConnector} for performing remote method calls on the L{BrokerServer}. They establish connectivity with the broker by connecting and registering themselves, the L{BrokerServer} will in turn connect to them in order to be able to perform remote method calls like broadcasting events and messages. @param name: The name of the client, such a C{monitor} or C{manager}. """ connector_class = self.connectors_registry.get(name) connector = connector_class(self._reactor, self._config) def register(remote_client): self._registered_clients[name] = remote_client self._connectors[remote_client] = connector connected = connector.connect() return connected.addCallback(register) def get_clients(self): """Get L{RemoteClient} instances for registered clients.""" return self._registered_clients.values() def get_client(self, name): """Return the client with the given C{name} or C{None}.""" return self._registered_clients.get(name) def get_connectors(self): """Get connectors for registered clients. @see L{RemoteLandscapeComponentCreator}. """ return self._connectors.values() def get_connector(self, name): """Return the connector for the given C{name} or C{None}.""" return self._connectors.get(self.get_client(name)) @remote def send_message(self, message, session_id, urgent=False): """Queue C{message} for delivery to the server at the next exchange. @param message: The message C{dict} to send to the server. It must have a C{type} key and be compatible with C{landscape.lib.bpickle}. @param session_id: A session ID. You should acquire this with C{get_session_id} before attempting to send a message. @param urgent: If C{True}, exchange urgently, otherwise exchange during the next regularly scheduled exchange. @return: The message identifier created when queuing C{message}. """ if session_id is None: raise RuntimeError( "Session ID must be set before attempting to send a message") if self._message_store.is_valid_session_id(session_id): return self._exchanger.send(message, urgent=urgent) @remote def is_message_pending(self, message_id): """Indicate if a message with given C{message_id} is pending.""" return self._message_store.is_pending(message_id) @remote def stop_clients(self): """Tell all the clients to exit.""" results = [] # FIXME: check whether the client are still alive for client in self.get_clients(): results.append(client.exit()) result = gather_results(results, consume_errors=True) return result.addCallback(lambda ignored: None) @remote def reload_configuration(self): """Reload the configuration file, and stop all clients.""" self._config.reload() # Now we'll kill off everything else so that they can be restarted and # notice configuration changes. return self.stop_clients() @remote def register(self): """Attempt to register with the Landscape server. @see: L{RegistrationHandler.register} """ return self._registration.register() @remote def get_accepted_message_types(self): """Return the message types accepted by the Landscape server.""" return self._message_store.get_accepted_types() @remote def get_server_uuid(self): """Return the uuid of the Landscape server we're pointing at.""" return self._message_store.get_server_uuid() @remote def register_client_accepted_message_type(self, type): """Register a new message type which can be accepted by this client. @param type: The message type to accept. """ self._exchanger.register_client_accepted_message_type(type) @remote def fire_event(self, event_type): """Fire an event in the broker reactor.""" self._reactor.fire(event_type) @remote def exit(self): """Request a graceful exit from the broker server. Before this method returns, all broker clients will be notified of the server broker's intention of exiting, so that they have the chance to stop whatever they're doing in a graceful way, and then exit themselves. This method will only return a result when all plugins returned their own results. """ clients_stopped = self.stop_clients() def schedule_reactor_stop(ignored): # Stop the reactor with a short delay to give us a chance to reply # to the caller when this method is invoked over AMP (typically # by the watchdog, see landscape.watchdog.Watchdog.request_exit). # # Note that stopping the reactor will cause the Twisted machinery # to invoke BrokerService.stopService, which in turn will stop the # exchanger/pinger and cleanly close all AMP sockets. self._reactor.call_later(1, lambda: self._reactor.stop()) return clients_stopped.addBoth(schedule_reactor_stop) @event def resynchronize(self): """Broadcast a C{resynchronize} event to the clients.""" @event def impending_exchange(self): """Broadcast an C{impending-exchange} event to the clients.""" @remote def listen_events(self, event_types): """ Return a C{Deferred} that fires when the first event occurs among the given ones. """ deferred = Deferred() calls = [] def get_handler(event_type): def handler(): for call in calls: self._reactor.cancel_call(call) deferred.callback(event_type) return handler for event_type in event_types: call = self._reactor.call_on(event_type, get_handler(event_type)) calls.append(call) return deferred @event def broker_reconnect(self): """Broadcast a C{broker-reconnect} event to the clients.""" @event def server_uuid_changed(self, old_uuid, new_uuid): """Broadcast a C{server-uuid-changed} event to the clients.""" @event def message_type_acceptance_changed(self, type, accepted): pass @event def package_data_changed(self): """Fire a package-data-changed event in the reactor of each client.""" def broadcast_message(self, message): """Call the C{message} method of all the registered plugins. @see: L{register_plugin}. """ results = [] for client in self.get_clients(): results.append(client.message(message)) result = gather_results(results) return result.addCallback(self._message_delivered, message) def _message_delivered(self, results, message): """ If the message wasn't handled, and it's an operation request (i.e. it has an operation-id), then respond with a failing operation result indicating as such. """ opid = message.get("operation-id") if (True not in results and opid is not None and message["type"] != "resynchronize"): mtype = message["type"] logging.error("Nobody handled the %s message." % (mtype,)) result_text = """\ Landscape client failed to handle this request (%s) because the plugin which should handle it isn't available. This could mean that the plugin has been intentionally disabled, or that the client isn't running properly, or you may be running an older version of the client that doesn't support this feature. """ % (mtype,) response = { "type": "operation-result", "status": FAILED, "result-text": result_text, "operation-id": opid} self._exchanger.send(response, urgent=True) @remote def stop_exchanger(self): """ Stop exchaging messages with the message server. Eventually, it is required by the plugin that no more message exchanges are performed. For example, when a reboot process in running, the client stops accepting new messages so that no client action is running while the machine is rebooting. Also, some activities should be explicitly require that no more messages are exchanged so some level of serialization in the client could be achieved. """ self._exchanger.stop() self._pinger.stop() landscape-client-14.01/landscape/broker/registration.py0000644000175000017500000004636212301414317023052 0ustar andreasandreas""" Handle client registration against the server. When the service is started for the first time it connects to the server as a new client without providing any identification credentials, and the server replies with the available registration mechanisms. At this point the machinery in this module will notice that we have no identification credentials yet and that the server accepts registration messages, so it will craft an appropriate one and send it out. """ import time import logging import socket from twisted.internet.defer import Deferred from landscape.message_schemas import juju_data from landscape.lib.bpickle import loads from landscape.lib.log import log_failure from landscape.lib.juju import get_juju_info from landscape.lib.fetch import fetch, FetchError from landscape.lib.tag import is_valid_tag_list from landscape.lib.network import get_fqdn from landscape.lib.vm_info import get_vm_info, get_container_info EC2_HOST = "169.254.169.254" EC2_API = "http://%s/latest" % (EC2_HOST,) class InvalidCredentialsError(Exception): """ Raised when an invalid account title and/or registration key is used with L{RegistrationManager.register}. """ def persist_property(name): def get(self): return self._persist.get(name) def set(self, value): self._persist.set(name, value) return property(get, set) def config_property(name): def get(self): return getattr(self._config, name) return property(get) class Identity(object): """Maintains details about the identity of this Landscape client. @ivar secure_id: A server-provided ID for secure message exchange. @ivar insecure_id: Non-secure server-provided ID, mainly used with the ping server. @ivar computer_title: See L{BrokerConfiguration}. @ivar account_name: See L{BrokerConfiguration}. @ivar registration_password: See L{BrokerConfiguration}. @ivar tags: See L{BrokerConfiguration} @param config: A L{BrokerConfiguration} object, used to set the C{computer_title}, C{account_name} and C{registration_password} instance variables. """ secure_id = persist_property("secure-id") insecure_id = persist_property("insecure-id") computer_title = config_property("computer_title") account_name = config_property("account_name") registration_key = config_property("registration_key") tags = config_property("tags") access_group = config_property("access_group") def __init__(self, config, persist): self._config = config self._persist = persist.root_at("registration") class RegistrationHandler(object): """ An object from which registration can be requested of the server, and which will handle forced ID changes from the server. L{register} should be used to perform initial registration. """ def __init__(self, config, identity, reactor, exchange, pinger, message_store, fetch_async=None): self._config = config self._identity = identity self._reactor = reactor self._exchange = exchange self._pinger = pinger self._message_store = message_store self._reactor.call_on("run", self._fetch_ec2_data) self._reactor.call_on("run", self._get_juju_data) self._reactor.call_on("pre-exchange", self._handle_pre_exchange) self._reactor.call_on("exchange-done", self._handle_exchange_done) self._exchange.register_message("set-id", self._handle_set_id) self._exchange.register_message("unknown-id", self._handle_unknown_id) self._exchange.register_message("registration", self._handle_registration) self._should_register = None self._fetch_async = fetch_async self._otp = None self._ec2_data = None self._juju_data = None def should_register(self): id = self._identity if id.secure_id: # We already have a secure ID, no need to register return False if self._config.cloud: return self._message_store.accepts("register-cloud-vm") elif self._config.provisioning_otp: return self._message_store.accepts("register-provisioned-machine") return bool(id.computer_title and id.account_name and self._message_store.accepts("register")) def register(self): """ Attempt to register with the Landscape server. @return: A L{Deferred} which will either be fired with None if registration was successful or will fail with an L{InvalidCredentialsError} if not. """ self._identity.secure_id = None self._identity.insecure_id = None result = RegistrationResponse(self._reactor).deferred self._exchange.exchange() return result def _get_juju_data(self): """Load Juju information.""" juju_info = get_juju_info(self._config) if juju_info is None: return None self._juju_data = dict( (key, juju_info[key]) for key in juju_data) def _get_data(self, path, accumulate): """ Get data at C{path} on the EC2 API endpoint, and add the result to the C{accumulate} list. """ return self._fetch_async(EC2_API + path).addCallback(accumulate.append) def _fetch_ec2_data(self): """Retrieve available EC2 information, if in a EC2 compatible cloud.""" id = self._identity if self._config.cloud and not id.secure_id: # Fetch data from the EC2 API, to be used later in the registration # process # We ignore errors from user-data because it's common for the # URL to return a 404 when the data is unavailable. ec2_data = [] deferred = self._fetch_async(EC2_API + "/user-data").addErrback( log_failure).addCallback(ec2_data.append) paths = [ "/meta-data/instance-id", "/meta-data/reservation-id", "/meta-data/local-hostname", "/meta-data/public-hostname", "/meta-data/ami-launch-index", "/meta-data/ami-id", "/meta-data/local-ipv4", "/meta-data/public-ipv4"] # We're not using a DeferredList here because we want to keep the # number of connections to the backend minimal. See lp:567515. for path in paths: deferred.addCallback( lambda ignore, path=path: self._get_data(path, ec2_data)) # Special case the ramdisk retrieval, because it may not be present deferred.addCallback( lambda ignore: self._fetch_async( EC2_API + "/meta-data/ramdisk-id").addErrback(log_failure)) deferred.addCallback(ec2_data.append) # And same for kernel deferred.addCallback( lambda ignore: self._fetch_async( EC2_API + "/meta-data/kernel-id").addErrback(log_failure)) deferred.addCallback(ec2_data.append) def record_data(ignore): """Record the instance data returned by the EC2 API.""" (raw_user_data, instance_key, reservation_key, local_hostname, public_hostname, launch_index, ami_key, local_ip, public_ip, ramdisk_key, kernel_key) = ec2_data self._ec2_data = { "instance_key": instance_key, "reservation_key": reservation_key, "local_hostname": local_hostname, "public_hostname": public_hostname, "launch_index": launch_index, "kernel_key": kernel_key, "ramdisk_key": ramdisk_key, "image_key": ami_key, "public_ipv4": public_ip, "local_ipv4": local_ip} for k, v in self._ec2_data.items(): if v is None and k in ("ramdisk_key", "kernel_key"): continue self._ec2_data[k] = v.decode("utf-8") self._ec2_data["launch_index"] = int( self._ec2_data["launch_index"]) if self._config.otp: self._otp = self._config.otp return instance_data = _extract_ec2_instance_data( raw_user_data, int(launch_index)) if instance_data is not None: self._otp = instance_data["otp"] exchange_url = instance_data["exchange-url"] ping_url = instance_data["ping-url"] self._exchange._transport.set_url(exchange_url) self._config.url = exchange_url self._config.ping_url = ping_url if "ssl-ca-certificate" in instance_data: from landscape.configuration import \ store_public_key_data public_key_file = store_public_key_data( self._config, instance_data["ssl-ca-certificate"]) self._config.ssl_public_key = public_key_file self._exchange._transport._pubkey = public_key_file self._config.write() def log_error(error): log_failure(error, msg="Got error while fetching meta-data: %r" % (error.value,)) deferred.addCallback(record_data) deferred.addErrback(log_error) def _handle_exchange_done(self): """Registered handler for the C{"exchange-done"} event. If we are not registered yet, schedule another message exchange. The first exchange made us accept the message type "register", so the next "pre-exchange" event will make L{_handle_pre_exchange} queue a registration message for delivery. """ if self.should_register() and not self._should_register: self._exchange.exchange() def _handle_pre_exchange(self): """ An exchange is about to happen. If we don't have a secure id already set, and we have the needed information available, queue a registration message with the server. A computer can fall into several categories: - a "cloud VM" - a "normal" computer - a "provisionned machine". Furthermore, Cloud VMs can be registered with either a One Time Password (OTP), or with a normal registration password. """ registration_failed = False # The point of storing this flag is that if we should *not* register # now, and then after the exchange we *should*, we schedule an urgent # exchange again. Without this flag we would just spin trying to # connect to the server when something is clearly preventing the # registration. self._should_register = self.should_register() if not self._should_register: return # These are just to shorten the code. identity = self._identity account_name = identity.account_name tags = identity.tags group = identity.access_group registration_key = identity.registration_key self._message_store.delete_all_messages() if not is_valid_tag_list(tags): tags = None logging.error("Invalid tags provided for cloud registration.") message = {"type": None, # either "register" or "register-cloud-vm" "otp": None, "hostname": get_fqdn(), "account_name": identity.account_name, "registration_password": None, "tags": tags, "vm-info": get_vm_info()} if group: message["access_group"] = group if self._config.cloud and self._ec2_data is not None: # This is the "cloud VM" case. message["type"] = "register-cloud-vm" message.update(self._ec2_data) if self._otp: logging.info("Queueing message to register with OTP") message["otp"] = self._otp elif account_name: with_tags = "and tags %s " % tags if tags else "" with_group = "in access group '%s' " % group if group else "" logging.info( u"Queueing message to register with account %r %s%s" u"as an EC2 instance." % ( account_name, with_group, with_tags)) message["registration_password"] = registration_key else: registration_failed = True elif account_name: # The computer is a normal computer, possibly a container. with_word = "with" if bool(registration_key) else "without" with_tags = "and tags %s " % tags if tags else "" with_group = "in access group '%s' " % group if group else "" logging.info(u"Queueing message to register with account %r %s%s" "%s a password." % (account_name, with_group, with_tags, with_word)) message["type"] = "register" message["computer_title"] = identity.computer_title message["registration_password"] = identity.registration_key message["container-info"] = get_container_info() if self._juju_data is not None: message["juju-info"] = self._juju_data elif self._config.provisioning_otp: # This is a newly provisionned machine. # In this case message is overwritten because it's much simpler logging.info(u"Queueing message to register with OTP as a" u" newly provisioned machine.") message = {"type": "register-provisioned-machine", "otp": self._config.provisioning_otp} else: registration_failed = True if registration_failed: self._reactor.fire("registration-failed") else: self._exchange.send(message) def _handle_set_id(self, message): """Registered handler for the C{"set-id"} event. Record and start using the secure and insecure IDs from the given message. Fire C{"registration-done"} and C{"resynchronize-clients"}. """ id = self._identity id.secure_id = message.get("id") id.insecure_id = message.get("insecure-id") logging.info("Using new secure-id ending with %s for account %s.", id.secure_id[-10:], id.account_name) logging.debug("Using new secure-id: %s", id.secure_id) self._reactor.fire("registration-done") self._reactor.fire("resynchronize-clients") def _handle_registration(self, message): if message["info"] == "unknown-account": self._reactor.fire("registration-failed") def _handle_unknown_id(self, message): id = self._identity clone = message.get("clone-of") if clone is None: logging.info("Client has unknown secure-id for account %s." % id.account_name) else: logging.info("Client is clone of computer %s" % clone) # Set a new computer title so when a registration request will be # made, the pending computer UI will indicate that this is a clone # of another computer. There's no need to persist the changes since # a new registration will be requested immediately. if clone == self._config.computer_title: title = "%s (clone)" % self._config.computer_title else: title = "%s (clone of %s)" % (self._config.computer_title, clone) self._config.computer_title = title id.secure_id = None id.insecure_id = None class RegistrationResponse(object): """A helper for dealing with the response of a single registration request. @ivar deferred: The L{Deferred} that will be fired as per L{RegistrationHandler.register}. """ def __init__(self, reactor): self._reactor = reactor self._done_id = reactor.call_on("registration-done", self._done) self._failed_id = reactor.call_on("registration-failed", self._failed) self.deferred = Deferred() def _cancel_calls(self): self._reactor.cancel_call(self._done_id) self._reactor.cancel_call(self._failed_id) def _done(self): self.deferred.callback(None) self._cancel_calls() def _failed(self): self.deferred.errback(InvalidCredentialsError()) self._cancel_calls() def _extract_ec2_instance_data(raw_user_data, launch_index): """ Given the raw string of EC2 User Data, parse it and return the dict of instance data for this particular instance. If the data can't be parsed, a debug message will be logged and None will be returned. """ try: user_data = loads(raw_user_data) except ValueError: logging.debug("Got invalid user-data %r" % (raw_user_data,)) return if not isinstance(user_data, dict): logging.debug("user-data %r is not a dict" % (user_data,)) return for key in "otps", "exchange-url", "ping-url": if key not in user_data: logging.debug("user-data %r doesn't have key %r." % (user_data, key)) return if len(user_data["otps"]) <= launch_index: logging.debug("user-data %r doesn't have OTP for launch index %d" % (user_data, launch_index)) return instance_data = {"otp": user_data["otps"][launch_index], "exchange-url": user_data["exchange-url"], "ping-url": user_data["ping-url"]} if "ssl-ca-certificate" in user_data: instance_data["ssl-ca-certificate"] = user_data["ssl-ca-certificate"] return instance_data def _wait_for_network(): """ Keep trying to connect to the EC2 metadata server until it becomes accessible or until five minutes pass. This is necessary because the networking init script on Ubuntu is asynchronous; the network may not actually be up by the time the landscape-client init script is invoked. """ timeout = 5 * 60 port = 80 start = time.time() while True: s = socket.socket() try: s.connect((EC2_HOST, port)) s.close() return except socket.error: time.sleep(1) if time.time() - start > timeout: break def is_cloud_managed(fetch=fetch): """ Return C{True} if the machine has been started by Landscape, i.e. if we can find the expected data inside the EC2 user-data field. """ _wait_for_network() try: raw_user_data = fetch(EC2_API + "/user-data", connect_timeout=5) launch_index = fetch(EC2_API + "/meta-data/ami-launch-index", connect_timeout=5) except FetchError: return False instance_data = _extract_ec2_instance_data( raw_user_data, int(launch_index)) return instance_data is not None landscape-client-14.01/landscape/broker/config.py0000644000175000017500000001031512301414317021572 0ustar andreasandreas"""Configuration class for the broker.""" import os from landscape.deployment import Configuration class BrokerConfiguration(Configuration): """Specialized configuration for the Landscape Broker. @cvar required_options: C{["url"]} """ def __init__(self): super(BrokerConfiguration, self).__init__() self._original_http_proxy = os.environ.get("http_proxy") self._original_https_proxy = os.environ.get("https_proxy") @property def exchange_store_path(self): return os.path.join(self.data_path, "exchange.database") def make_parser(self): """Parser factory for broker-specific options. @return: An L{OptionParser} preset for all the options from L{Configuration.make_parser} plus: - C{account_name} - C{registration_key} - C{computer_title} - C{exchange_interval} (C{15*60}) - C{urgent_exchange_interval} (C{1*60}) - C{http_proxy} - C{https_proxy} - C{cloud} - C{otp} - C{provisioning_otp} """ parser = super(BrokerConfiguration, self).make_parser() parser.add_option("-a", "--account-name", metavar="NAME", help="The account this computer belongs to.") parser.add_option("-p", "--registration-key", metavar="KEY", help="The account-wide key used for " "registering clients.") parser.add_option("-t", "--computer-title", metavar="TITLE", help="The title of this computer") parser.add_option("--exchange-interval", default=15 * 60, type="int", metavar="INTERVAL", help="The number of seconds between server " "exchanges.") parser.add_option("--urgent-exchange-interval", default=1 * 60, type="int", metavar="INTERVAL", help="The number of seconds between urgent server " "exchanges.") parser.add_option("--ping-interval", default=30, type="int", metavar="INTERVAL", help="The number of seconds between pings.") parser.add_option("--http-proxy", metavar="URL", help="The URL of the HTTP proxy, if one is needed.") parser.add_option("--https-proxy", metavar="URL", help="The URL of the HTTPS proxy, if one is needed.") parser.add_option("--cloud", action="store_true", help="Set this if your computer is in an EC2 cloud.") parser.add_option("--otp", default="", help="The OTP to use in cloud configuration.") parser.add_option("--access-group", default="", help="Suggested access group for this computer.") parser.add_option("--tags", help="Comma separated list of tag names to be sent " "to the server.") parser.add_option("--provisioning-otp", default="", help="The OTP to use for a provisioned machine.") return parser @property def message_store_path(self): """Get the path to the message store.""" return os.path.join(self.data_path, "messages") def load(self, args, accept_nonexistent_config=False): """ Load options from command line arguments and a config file. Load the configuration with L{Configuration.load}, and then set C{http_proxy} and C{https_proxy} environment variables based on that config data. """ super(BrokerConfiguration, self).load( args, accept_nonexistent_config=accept_nonexistent_config) if self.http_proxy: os.environ["http_proxy"] = self.http_proxy elif self._original_http_proxy: os.environ["http_proxy"] = self._original_http_proxy if self.https_proxy: os.environ["https_proxy"] = self.https_proxy elif self._original_https_proxy: os.environ["https_proxy"] = self._original_https_proxy landscape-client-14.01/landscape/broker/exchangestore.py0000644000175000017500000000724012301414317023167 0ustar andreasandreas"""Provide access to the persistent data used by the L{MessageExchange}.""" import time try: import sqlite3 except ImportError: from pysqlite2 import dbapi2 as sqlite3 from landscape.lib.store import with_cursor class MessageContext(object): """Stores a context for incoming messages that require a response. The context consists of - the "operation-id" value - the secure ID that was in effect when the message was received - the message type - the time when the message was received This data will be used to detect secure ID changes between the time at which the request message came in and the completion of the request. If the secure ID did change the result message is obolete and will not be sent to the server. @param db: the sqlite database handle. @param id: the database key value for this instance. """ def __init__(self, db, operation_id, secure_id, message_type, timestamp): self._db = db self.operation_id = operation_id self.secure_id = secure_id self.message_type = message_type self.timestamp = timestamp @with_cursor def remove(self, cursor): cursor.execute( "DELETE FROM message_context WHERE operation_id=?", (self.operation_id,)) class ExchangeStore(object): """Message meta data required by the L{MessageExchange}. The implementation uses a SQLite database as backend, with a single table called "message_context", whose schema is defined in L{ensure_exchange_schema}. @param filename: The name of the file that contains the sqlite database. """ _db = None def __init__(self, filename): self._filename = filename def _ensure_schema(self): ensure_exchange_schema(self._db) @with_cursor def add_message_context( self, cursor, operation_id, secure_id, message_type): """Add a L{MessageContext} with the given data.""" params = (operation_id, secure_id, message_type, time.time()) cursor.execute( "INSERT INTO message_context " " (operation_id, secure_id, message_type, timestamp) " " VALUES (?,?,?,?)", params) return MessageContext(self._db, *params) @with_cursor def get_message_context(self, cursor, operation_id): """The L{MessageContext} for the given C{operation_id} or C{None}.""" cursor.execute( "SELECT operation_id, secure_id, message_type, timestamp " "FROM message_context WHERE operation_id=?", (operation_id,)) row = cursor.fetchone() if row: return MessageContext(self._db, *row) else: return None @with_cursor def all_operation_ids(self, cursor): """Return all operation IDs currently stored in C{message_context}.""" cursor.execute("SELECT operation_id FROM message_context") result = cursor.fetchall() return [row[0] for row in result] def ensure_exchange_schema(db): """Create all tables needed by a L{ExchangeStore}. @param db: A connection to a SQLite database. """ cursor = db.cursor() try: cursor.execute( "CREATE TABLE message_context" " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP, " " secure_id TEXT NOT NULL, operation_id INTEGER NOT NULL, " " message_type text NOT NULL)") cursor.execute( "CREATE UNIQUE INDEX msgctx_operationid_idx ON " "message_context(operation_id)") except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() else: cursor.close() db.commit() landscape-client-14.01/landscape/broker/tests/0000755000175000017500000000000012301414317021115 5ustar andreasandreaslandscape-client-14.01/landscape/broker/tests/helpers.py0000644000175000017500000002350512301414317023136 0ustar andreasandreas"""Test helpers for wiring together the various components in the broker stack. Each test helper sets up a particular component in the stack of the broker dependencies. The lowest-level component is a L{BrokerConfiguration} instance, the highest-level ones are a full L{BrokerServer} exposed over AMP and connected to remote test L{BrokerClient}. """ import os from landscape.lib.fetch import fetch_async from landscape.lib.persist import Persist from landscape.watchdog import bootstrap_list from landscape.reactor import FakeReactor from landscape.amp import ComponentPublisher from landscape.broker.transport import FakeTransport from landscape.broker.exchange import MessageExchange from landscape.broker.exchangestore import ExchangeStore from landscape.broker.store import get_default_message_store from landscape.broker.registration import Identity, RegistrationHandler from landscape.broker.ping import Pinger from landscape.broker.config import BrokerConfiguration from landscape.broker.server import BrokerServer from landscape.broker.amp import RemoteBrokerConnector from landscape.broker.client import BrokerClient class BrokerConfigurationHelper(object): """Setup a L{BrokerConfiguration} instance with some test config values. The following attributes will be set on your test case: - C{config}: A sample L{BrokerConfiguration}. - C{config_filename}: The name of the configuration file that was used to generate the above C{config}. """ def set_up(self, test_case): data_path = test_case.makeDir() log_dir = test_case.makeDir() test_case.config_filename = os.path.join(test_case.makeDir(), "client.conf") open(test_case.config_filename, "w").write( "[client]\n" "url = http://localhost:91919\n" "computer_title = Some Computer\n" "account_name = some_account\n" "ping_url = http://localhost:91910\n" "data_path = %s\n" "log_dir = %s\n" % (data_path, log_dir)) bootstrap_list.bootstrap(data_path=data_path, log_dir=log_dir) test_case.config = BrokerConfiguration() test_case.config.load(["-c", test_case.config_filename]) def tear_down(self, test_case): pass class ExchangeHelper(BrokerConfigurationHelper): """Setup a L{MessageExchange} instance along with its dependencies. This helper uses the sample broker configuration provided by the L{BrokerConfigurationHelper} to create all the components needed by a L{MessageExchange}. The following attributes will be set on your test case: - C{exchanger}: A L{MessageExchange} using a L{FakeReactor} and a L{FakeTransport}. - C{reactor}: The L{FakeReactor} used by the C{exchager}. - C{transport}: The L{FakeTransport} used by the C{exchanger}. - C{identity}: The L{Identity} used by the C{exchanger} and based on the sample configuration. - C{mstore}: The L{MessageStore} used by the C{exchanger} and based on the sample configuration. - C{persist}: The L{Persist} object used by C{mstore} and C{identity}. - C{persit_filename}: Path to the file holding the C{persist} data. """ def set_up(self, test_case): super(ExchangeHelper, self).set_up(test_case) test_case.persist_filename = test_case.makePersistFile() test_case.persist = Persist(filename=test_case.persist_filename) test_case.mstore = get_default_message_store( test_case.persist, test_case.config.message_store_path) test_case.identity = Identity(test_case.config, test_case.persist) test_case.transport = FakeTransport(None, test_case.config.url, test_case.config.ssl_public_key) test_case.reactor = FakeReactor() test_case.exchange_store = ExchangeStore( test_case.config.exchange_store_path) test_case.exchanger = MessageExchange( test_case.reactor, test_case.mstore, test_case.transport, test_case.identity, test_case.exchange_store, test_case.config) class RegistrationHelper(ExchangeHelper): """Setup a L{RegistrationHandler} instance along with its dependencies. This helper adds a L{RegistrationHandler} instance to L{ExchangeHelper}. If the test case has C{cloud} class attribute, the L{RegistrationHandler} will be configured for a cloud registration. The following attributes will be set in your test case: - C{handler}: A L{RegistrationHandler}. - C{fetch_func}: The C{fetch_async} function used by the C{handler}, it can be customised by test cases. """ def set_up(self, test_case): super(RegistrationHelper, self).set_up(test_case) test_case.pinger = Pinger(test_case.reactor, test_case.identity, test_case.exchanger, test_case.config) def fetch_func(*args, **kwargs): return test_case.fetch_func(*args, **kwargs) test_case.fetch_func = fetch_async test_case.config.cloud = getattr(test_case, "cloud", False) if hasattr(test_case, "juju_contents"): test_case.makeFile( test_case.juju_contents, path=test_case.config.juju_filename) test_case.handler = RegistrationHandler( test_case.config, test_case.identity, test_case.reactor, test_case.exchanger, test_case.pinger, test_case.mstore, fetch_async=fetch_func) class BrokerServerHelper(RegistrationHelper): """Setup a L{BrokerServer} instance. This helper adds a L{BrokerServer} to the L{RegistrationHelper}. The following attributes will be set in your test case: - C{broker}: A L{BrokerServer}. """ def set_up(self, test_case): super(BrokerServerHelper, self).set_up(test_case) test_case.broker = BrokerServer(test_case.config, test_case.reactor, test_case.exchanger, test_case.handler, test_case.mstore, test_case.pinger) class RemoteBrokerHelper(BrokerServerHelper): """Setup a connected L{RemoteBroker}. This helper extends L{BrokerServerHelper}.by adding a L{RemoteBroker} which exposes the L{BrokerServer} instance remotely via our AMP-based machinery. IMPORTANT: note that the connection is created using a *real* Unix socket, calling L{FakeReactor.call_unix} which in turn defers to the *real* Twisted reactor. This means that all calls to the L{RemoteBroker} instance will be truly asynchronous and tests will need to return deferreds in order to let the reactor run. See also:: http://twistedmatrix.com/documents/current/core/howto/testing.html and the "Leave the Reactor as you found it" paragraph to understand how to write tests interacting with the reactor. The following attributes will be set in your test case: - C{remote}: A C{RemoteObject} connected to the broker server. """ def set_up(self, test_case): super(RemoteBrokerHelper, self).set_up(test_case) self._publisher = ComponentPublisher(test_case.broker, test_case.reactor, test_case.config) self._connector = RemoteBrokerConnector(test_case.reactor, test_case.config) self._publisher.start() deferred = self._connector.connect() test_case.remote = test_case.successResultOf(deferred) def tear_down(self, test_case): self._connector.disconnect() self._publisher.stop() super(RemoteBrokerHelper, self).tear_down(test_case) class BrokerClientHelper(RemoteBrokerHelper): """Setup a connected L{BrokerClient}. This helper adds a L{BrokerClient} connected to a L{BrokerServerHelper} via its C{broker} attribute, which is the L{RemoteBroker} instance setup by the L{RemoteBrokerHelper}. The following attributes will be set in your test case: - C{client}: A connected L{BrokerClient}. - C{client_reactor}: The L{FakeReactor} used by the client. Note that this needs to be different from the C{reactor} attribute, which is the L{FakeReactor} used by the L{BrokerServer}, so tests can emulate events firing in different processes. """ def set_up(self, test_case): super(BrokerClientHelper, self).set_up(test_case) # The client needs its own reactor to avoid infinite loops # when the broker broadcasts and event test_case.client_reactor = FakeReactor() test_case.client = BrokerClient(test_case.client_reactor) test_case.client.broker = test_case.remote class RemoteClientHelper(BrokerClientHelper): """Setup a connected and registered L{RemoteClient}. This helper extends L{BrokerClientHelper} by registering the test L{BrokerClient} against the L{BrokerServer} which will then be able to talk to it via our AMP-based machinery. . The following attributes will be set in your test case: - C{remote_client}: A C{RemoteClient} connected to a registered client. """ def set_up(self, test_case): super(RemoteClientHelper, self).set_up(test_case) self._client_publisher = ComponentPublisher(test_case.client, test_case.reactor, test_case.config) self._client_publisher.start() test_case.remote.register_client("client") test_case.remote_client = test_case.broker.get_client("client") self._client_connector = test_case.broker.get_connector("client") def tear_down(self, test_case): self._client_connector.disconnect() self._client_publisher.stop() super(RemoteClientHelper, self).tear_down(test_case) landscape-client-14.01/landscape/broker/tests/test_transport.py0000644000175000017500000001241312301414317024563 0ustar andreasandreasimport os from landscape import VERSION from landscape.broker.transport import HTTPTransport from landscape.lib.fetch import PyCurlError from landscape.lib import bpickle from landscape.tests.helpers import LandscapeTest, LogKeeperHelper from twisted.web import server, resource from twisted.internet import reactor from twisted.internet.ssl import DefaultOpenSSLContextFactory from twisted.internet.threads import deferToThread def sibpath(path): return os.path.join(os.path.dirname(__file__), path) PRIVKEY = sibpath("private.ssl") PUBKEY = sibpath("public.ssl") BADPRIVKEY = sibpath("badprivate.ssl") BADPUBKEY = sibpath("badpublic.ssl") class DataCollectingResource(resource.Resource): request = content = None def getChild(self, request, name): return self def render(self, request): self.request = request self.content = request.content.read() return bpickle.dumps("Great.") class HTTPTransportTest(LandscapeTest): helpers = [LogKeeperHelper] def setUp(self): super(HTTPTransportTest, self).setUp() self.ports = [] def tearDown(self): super(HTTPTransportTest, self).tearDown() for port in self.ports: port.stopListening() def test_get_url(self): url = "http://example/ooga" transport = HTTPTransport(None, url) self.assertEqual(transport.get_url(), url) def test_set_url(self): transport = HTTPTransport(None, "http://example/ooga") transport.set_url("http://example/message-system") self.assertEqual(transport.get_url(), "http://example/message-system") def test_request_data(self): """ When a request is sent with HTTPTransport.exchange, it should include the (optional) computer ID, a user agent, and the message API version as HTTP headers, and the payload as a bpickled request body. """ r = DataCollectingResource() port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1") self.ports.append(port) transport = HTTPTransport( None, "http://localhost:%d/" % (port.getHost().port,)) result = deferToThread(transport.exchange, "HI", computer_id="34", exchange_token="abcd-efgh", message_api="X.Y") def got_result(ignored): self.assertEqual(r.request.received_headers["x-computer-id"], "34") self.assertEqual(r.request.received_headers["x-exchange-token"], "abcd-efgh") self.assertEqual(r.request.received_headers["user-agent"], "landscape-client/%s" % (VERSION,)) self.assertEqual(r.request.received_headers["x-message-api"], "X.Y") self.assertEqual(bpickle.loads(r.content), "HI") result.addCallback(got_result) return result def test_ssl_verification_positive(self): """ The client transport should complete an upload of messages to a host which provides SSL data which can be verified by the public key specified. """ r = DataCollectingResource() context_factory = DefaultOpenSSLContextFactory(PRIVKEY, PUBKEY) port = reactor.listenSSL(0, server.Site(r), context_factory, interface="127.0.0.1") self.ports.append(port) transport = HTTPTransport( None, "https://localhost:%d/" % (port.getHost().port,), PUBKEY) result = deferToThread(transport.exchange, "HI", computer_id="34", message_api="X.Y") def got_result(ignored): self.assertEqual(r.request.received_headers["x-computer-id"], "34") self.assertEqual(r.request.received_headers["user-agent"], "landscape-client/%s" % (VERSION,)) self.assertEqual(r.request.received_headers["x-message-api"], "X.Y") self.assertEqual(bpickle.loads(r.content), "HI") result.addCallback(got_result) return result def test_ssl_verification_negative(self): """ If the SSL server provides a key which is not verified by the specified public key, then the client should immediately end the connection without uploading any message data. """ self.log_helper.ignore_errors(PyCurlError) r = DataCollectingResource() context_factory = DefaultOpenSSLContextFactory( BADPRIVKEY, BADPUBKEY) port = reactor.listenSSL(0, server.Site(r), context_factory, interface="127.0.0.1") self.ports.append(port) transport = HTTPTransport(None, "https://localhost:%d/" % (port.getHost().port,), pubkey=PUBKEY) result = deferToThread(transport.exchange, "HI", computer_id="34", message_api="X.Y") def got_result(ignored): self.assertIs(r.request, None) self.assertIs(r.content, None) self.assertTrue("server certificate verification failed" in self.logfile.getvalue()) result.addErrback(got_result) return result landscape-client-14.01/landscape/broker/tests/test_store.py0000644000175000017500000006067012301414317023673 0ustar andreasandreasimport tempfile import shutil import os from landscape.lib.persist import Persist from landscape.broker.store import MessageStore from landscape.schema import InvalidError, Message, Int, Bytes, Unicode from landscape.tests.helpers import LandscapeTest from landscape.tests.mocker import ANY from landscape import SERVER_API class MessageStoreTest(LandscapeTest): def setUp(self): super(MessageStoreTest, self).setUp() self.temp_dir = tempfile.mkdtemp() self.persist_filename = tempfile.mktemp() self.store = self.create_store() def create_store(self): persist = Persist(filename=self.persist_filename) store = MessageStore(persist, self.temp_dir, 20) store.set_accepted_types(["empty", "data", "resynchronize"]) store.add_schema(Message("empty", {})) store.add_schema(Message("empty2", {})) store.add_schema(Message("data", {"data": Bytes()})) store.add_schema(Message("unaccepted", {"data": Bytes()})) store.add_schema(Message("resynchronize", {})) return store def tearDown(self): super(MessageStoreTest, self).tearDown() shutil.rmtree(self.temp_dir) if os.path.isfile(self.persist_filename): os.unlink(self.persist_filename) def test_get_set_sequence(self): self.assertEqual(self.store.get_sequence(), 0) self.store.set_sequence(3) self.assertEqual(self.store.get_sequence(), 3) # Ensure it's actually saved. self.store.commit() store = self.create_store() self.assertEqual(store.get_sequence(), 3) def test_get_set_server_sequence(self): self.assertEqual(self.store.get_server_sequence(), 0) self.store.set_server_sequence(3) self.assertEqual(self.store.get_server_sequence(), 3) # Ensure it's actually saved. self.store.commit() store = self.create_store() self.assertEqual(store.get_server_sequence(), 3) def test_get_set_server_uuid(self): self.assertEqual(self.store.get_server_uuid(), None) self.store.set_server_uuid("abcd-efgh") self.assertEqual(self.store.get_server_uuid(), "abcd-efgh") # Ensure it's actually saved. self.store.commit() store = self.create_store() self.assertEqual(store.get_server_uuid(), "abcd-efgh") def test_get_set_exchange_token(self): """ The next-exchange-token value can be persisted and retrieved. """ self.assertEqual(self.store.get_exchange_token(), None) self.store.set_exchange_token("abcd-efgh") self.assertEqual(self.store.get_exchange_token(), "abcd-efgh") # Ensure it's actually saved. self.store.commit() store = self.create_store() self.assertEqual(store.get_exchange_token(), "abcd-efgh") def test_get_pending_offset(self): self.assertEqual(self.store.get_pending_offset(), 0) self.store.set_pending_offset(3) self.assertEqual(self.store.get_pending_offset(), 3) def test_add_pending_offset(self): self.assertEqual(self.store.get_pending_offset(), 0) self.store.add_pending_offset(3) self.assertEqual(self.store.get_pending_offset(), 3) self.store.add_pending_offset(3) self.assertEqual(self.store.get_pending_offset(), 6) self.store.add_pending_offset(-3) self.assertEqual(self.store.get_pending_offset(), 3) def test_no_pending_messages(self): self.assertEqual(self.store.get_pending_messages(1), []) def test_delete_no_messages(self): self.store.delete_old_messages() self.assertEqual(0, self.store.count_pending_messages()) def test_delete_old_messages_does_not_delete_held(self): """ Deleting old messages should avoid deleting held messages. """ self.store.add({"type": "unaccepted", "data": "blah"}) self.store.add({"type": "empty"}) self.store.set_pending_offset(1) self.store.delete_old_messages() self.store.set_accepted_types(["empty", "unaccepted"]) self.store.set_pending_offset(0) messages = self.store.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "unaccepted") def test_delete_all_messages(self): """Resetting the message store means removing *ALL* messages.""" self.store.set_accepted_types(["empty"]) self.store.add({"type": "unaccepted", "data": "blah"}) self.store.add({"type": "empty"}) self.store.add({"type": "unaccepted", "data": "blah"}) self.store.add({"type": "empty"}) self.store.set_pending_offset(2) self.store.delete_all_messages() self.store.set_accepted_types(["empty", "unaccepted"]) self.assertEqual(self.store.get_pending_offset(), 0) self.assertEqual(self.store.get_pending_messages(), []) def test_one_message(self): self.store.add(dict(type="data", data="A thing")) messages = self.store.get_pending_messages(200) self.assertMessages(messages, [{"type": "data", "data": "A thing", "api": SERVER_API}]) def test_max_pending(self): for i in range(10): self.store.add(dict(type="data", data=str(i))) il = [m["data"] for m in self.store.get_pending_messages(5)] self.assertEqual(il, map(str, [0, 1, 2, 3, 4])) def test_offset(self): self.store.set_pending_offset(5) for i in range(15): self.store.add(dict(type="data", data=str(i))) il = [m["data"] for m in self.store.get_pending_messages(5)] self.assertEqual(il, map(str, [5, 6, 7, 8, 9])) def test_exercise_multi_dir(self): for i in range(35): self.store.add(dict(type="data", data=str(i))) il = [m["data"] for m in self.store.get_pending_messages(50)] self.assertEqual(il, map(str, range(35))) def test_wb_clean_up_empty_directories(self): for i in range(60): self.store.add(dict(type="data", data=str(i))) il = [m["data"] for m in self.store.get_pending_messages(60)] self.assertEqual(il, map(str, range(60))) self.assertEqual(set(os.listdir(self.temp_dir)), set(["0", "1", "2"])) self.store.set_pending_offset(60) self.store.delete_old_messages() self.assertEqual(os.listdir(self.temp_dir), []) def test_unaccepted(self): for i in range(10): self.store.add(dict(type=["data", "unaccepted"][i % 2], data=str(i))) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [0, 2, 4, 6, 8])) def test_unaccepted_with_offset(self): for i in range(10): self.store.add(dict(type=["data", "unaccepted"][i % 2], data=str(i))) self.store.set_pending_offset(2) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [4, 6, 8])) def test_unaccepted_reaccepted(self): for i in range(10): self.store.add(dict(type=["data", "unaccepted"][i % 2], data=str(i))) self.store.set_pending_offset(2) il = [m["data"] for m in self.store.get_pending_messages(2)] self.store.set_accepted_types(["data", "unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [4, 6, 8, 1, 3, 5, 7, 9])) def test_accepted_unaccepted(self): for i in range(10): self.store.add(dict(type=["data", "unaccepted"][i % 2], data=str(i))) # Setting pending offset here means that the first two # messages, even though becoming unaccepted now, were already # accepted before, so they shouldn't be marked for hold. self.store.set_pending_offset(2) self.store.set_accepted_types(["unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [1, 3, 5, 7, 9])) self.store.set_accepted_types(["data", "unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [1, 3, 5, 7, 9, 4, 6, 8])) def test_accepted_unaccepted_old(self): for i in range(10): self.store.add(dict(type=["data", "unaccepted"][i % 2], data=str(i))) self.store.set_pending_offset(2) self.store.set_accepted_types(["unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [1, 3, 5, 7, 9])) # Now, if the server asks us to go back and process # previously accepted messages that are now unaccepted, # they should be put on hold. self.store.set_pending_offset(0) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [1, 3, 5, 7, 9])) # When the server starts accepting them again, these old # messages will also be delivered. self.store.set_accepted_types(["data", "unaccepted"]) il = [m["data"] for m in self.store.get_pending_messages(20)] self.assertEqual(il, map(str, [1, 3, 5, 7, 9, 0, 2, 4, 6, 8])) def test_wb_handle_broken_messages(self): self.log_helper.ignore_errors(ValueError) self.store.add({"type": "empty"}) self.store.add({"type": "empty2"}) filename = os.path.join(self.temp_dir, "0", "0") self.assertTrue(os.path.isfile(filename)) with open(filename, "w") as fh: fh.write("bpickle will break reading this") self.assertEqual(self.store.get_pending_messages(), []) # FIXME This is an unfortunate assertion because it relies on # a message generated by external code. As it turns out, this # message is different between Python 2.4 and 2.5. The # snippet checked here is the largest common chunk between # Python 2.4 and 2.5. It might be worth making the message # store call an event handler when it encounters a broken # message and hooking on that for this assertion instead of # relying on this fragile check. self.assertIn("invalid literal for int()", self.logfile.getvalue()) self.logfile.seek(0) self.logfile.truncate() # Unholding will also load the message. self.store.set_accepted_types([]) self.store.set_accepted_types(["empty", "empty2"]) self.assertIn("invalid literal for int()", self.logfile.getvalue()) def test_wb_delete_messages_with_broken(self): self.log_helper.ignore_errors(ValueError) self.store.add({"type": "data", "data": "1"}) self.store.add({"type": "data", "data": "2"}) filename = os.path.join(self.temp_dir, "0", "0") self.assertTrue(os.path.isfile(filename)) with open(filename, "w") as fh: fh.write("bpickle will break reading this") messages = self.store.get_pending_messages() self.assertEqual(messages, [{"type": "data", "data": "2", "api": SERVER_API}]) self.store.set_pending_offset(len(messages)) messages = self.store.get_pending_messages() self.store.delete_old_messages() self.assertEqual(messages, []) self.assertIn("ValueError", self.logfile.getvalue()) def test_atomic_message_writing(self): """ If the server gets unplugged halfway through writing a file, the message should not be half-written. """ self.store.add_schema(Message("data", {"data": Int()})) self.store.add({"type": "data", "data": 1}) # We simulate it by creating a fake file which raises halfway through # writing a file. replaced_file_factory = self.mocker.replace("__builtin__.open", passthrough=False) replaced_file = replaced_file_factory(ANY, "w") replaced_file.write(ANY) self.mocker.throw(IOError("Sorry, pal!")) self.mocker.replay() # This kind of ensures that raising an exception is somewhat # similar to unplugging the power -- i.e., we're not relying # on special exception-handling in the file-writing code. self.assertRaises(IOError, self.store.add, {"type": "data", "data": 2}) self.mocker.verify() self.mocker.reset() self.assertEqual(self.store.get_pending_messages(), [{"type": "data", "data": 1, "api": SERVER_API}]) def test_api_attribute(self): self.assertEqual(self.store.api, SERVER_API) new_api = "New API version!" self.store.api = new_api self.assertEqual(self.store.api, new_api) def test_default_api_on_messages(self): self.store.add({"type": "empty"}) self.assertEqual(self.store.get_pending_messages(), [{"type": "empty", "api": SERVER_API}]) def test_custom_api_on_store(self): self.store.api = "X.Y" self.store.add({"type": "empty"}) self.assertEqual(self.store.get_pending_messages(), [{"type": "empty", "api": "X.Y"}]) def test_custom_api_on_messages(self): self.store.add({"type": "empty", "api": "X.Y"}) self.assertEqual(self.store.get_pending_messages(), [{"type": "empty", "api": "X.Y"}]) def test_coercion(self): """ When adding a message to the mesage store, it should be coerced according to the message schema for the type of the message. """ self.assertRaises(InvalidError, self.store.add, {"type": "data", "data": 3}) def test_coercion_ignores_custom_api(self): """ If a custom 'api' key is specified in the message, it should not be considered during schema verification. """ self.store.add({"type": "empty", "api": "whatever"}) def test_message_is_actually_coerced(self): """ The message that eventually gets sent should be the result of the coercion. """ self.store.add_schema(Message("data", {"data": Unicode()})) self.store.add({"type": "data", "data": u"\N{HIRAGANA LETTER A}".encode("utf-8"), "api": "whatever"}) self.assertEqual(self.store.get_pending_messages(), [{"type": "data", "api": "whatever", "data": u"\N{HIRAGANA LETTER A}"}]) def test_count_pending_messages(self): """It is possible to get the total number of pending messages.""" self.assertEqual(self.store.count_pending_messages(), 0) self.store.add({"type": "empty"}) self.assertEqual(self.store.count_pending_messages(), 1) self.store.add({"type": "data", "data": "yay"}) self.assertEqual(self.store.count_pending_messages(), 2) def test_commit(self): """ The Message Store can be told to save its persistent data to disk on demand. """ filename = self.makeFile() store = MessageStore(Persist(filename=filename), self.temp_dir) store.set_accepted_types(["foo", "bar"]) self.assertFalse(os.path.exists(filename)) store.commit() self.assertTrue(os.path.exists(filename)) store = MessageStore(Persist(filename=filename), self.temp_dir) self.assertEqual(set(store.get_accepted_types()), set(["foo", "bar"])) def test_is_pending_pre_and_post_message_delivery(self): self.log_helper.ignore_errors(ValueError) # We add a couple of messages held and broken, and also a few normal # messages before and after, just to increase the chances of breaking # due to picking the pending offset incorrectly. self.store.set_accepted_types(["empty"]) # For the same reason we break the first message. self.store.add({"type": "empty"}) filename = os.path.join(self.temp_dir, "0", "0") self.assertTrue(os.path.isfile(filename)) with open(filename, "w") as fh: fh.write("bpickle will break reading this") # And hold the second one. self.store.add({"type": "data", "data": "A thing"}) self.store.add({"type": "empty"}) self.store.add({"type": "empty"}) id = self.store.add({"type": "empty"}) self.store.add({"type": "empty"}) self.store.add({"type": "empty"}) # Broken messages will be processed here. self.assertTrue(len(self.store.get_pending_messages()), 5) self.assertTrue(self.store.is_pending(id)) self.store.add_pending_offset(2) self.assertTrue(self.store.is_pending(id)) self.store.add_pending_offset(1) self.assertFalse(self.store.is_pending(id)) def test_is_pending_with_held_message(self): self.store.set_accepted_types(["empty"]) id = self.store.add({"type": "data", "data": "A thing"}) # Add another normal message and increment the pending offset # to make the held message stay "behind" in the queue. self.store.add({"type": "empty"}) self.store.add_pending_offset(1) self.assertTrue(self.store.is_pending(id)) def test_is_pending_with_broken_message(self): """When a message breaks we consider it to be no longer there.""" self.log_helper.ignore_errors(ValueError) id = self.store.add({"type": "empty"}) filename = os.path.join(self.temp_dir, "0", "0") self.assertTrue(os.path.isfile(filename)) with open(filename, "w") as fh: fh.write("bpickle will break reading this") self.assertEqual(self.store.get_pending_messages(), []) self.assertFalse(self.store.is_pending(id)) def test_get_session_id_returns_the_same_id_for_the_same_scope(self): """We get the same id returned from get_session_id when we used the same scope. """ global_session_id1 = self.store.get_session_id() global_session_id2 = self.store.get_session_id() self.assertEqual(global_session_id1, global_session_id2) def test_get_session_id_unique_for_each_scope(self): """We get a unique session id for differing scopes. """ session_id1 = self.store.get_session_id() session_id2 = self.store.get_session_id(scope="other") self.assertNotEqual(session_id1, session_id2) def test_get_session_id_assigns_global_scope_when_none_is_provided(self): """Test that get_session_id puts session ids in global scope by default. """ session_id = self.store.get_session_id() persisted_ids = self.store._persist.get('session-ids') scope = persisted_ids[session_id] self.assertIs(None, scope) def test_get_session_id_with_scope(self): """Test that we can generate a session id within a limited scope.""" session_id = self.store.get_session_id(scope="hwinfo") persisted_ids = self.store._persist.get('session-ids') scope = persisted_ids[session_id] self.assertEqual("hwinfo", scope) def test_persisted_session_ids_are_valid(self): """ Test that generated session ids are persisted in the message store and can be validated with C{is_valid_session_id}. """ session_id = self.store.get_session_id() self.assertTrue(self.store.is_valid_session_id(session_id)) def test_unknown_session_ids_are_not_valid(self): """ If the provided session id is not in the persisted list of session ids then it can not be validated with C{is_valid_session_id}. """ session_id = "I've got a lovely bunch of coconuts" self.assertFalse(self.store.is_valid_session_id(session_id)) def test_drop_session_ids(self): """ Session ids can be dropped on demand. """ session_id = self.store.get_session_id() self.store.drop_session_ids() self.assertFalse(self.store.is_valid_session_id(session_id)) def test_drop_session_ids_drops_all_scopes_with_no_scopes_parameter(self): """When C{drop_session_ids} is called with no scopes then all session_ids are dropped. """ session_id1 = self.store.get_session_id() session_id2 = self.store.get_session_id(scope="hwinfo") self.store.drop_session_ids() self.assertFalse(self.store.is_valid_session_id(session_id1)) self.assertFalse(self.store.is_valid_session_id(session_id2)) def test_drop_session_ids_with_scope_drops_only_that_scope(self): """Calling C{drop_session_ids} with a scope only deletes session_ids within that scope.""" global_session_id = self.store.get_session_id() hwinfo_session_id = self.store.get_session_id(scope="hwinfo") package_session_id = self.store.get_session_id(scope="package") self.store.drop_session_ids(scopes=["hwinfo"]) self.assertTrue(self.store.is_valid_session_id(global_session_id)) self.assertFalse(self.store.is_valid_session_id(hwinfo_session_id)) self.assertTrue(self.store.is_valid_session_id(package_session_id)) def test_drop_multiple_scopes(self): """ If we pass multiple scopes into C{drop_session_ids} then those scopes are all dropped but no other are. """ global_session_id = self.store.get_session_id() disk_session_id = self.store.get_session_id(scope="disk") hwinfo_session_id = self.store.get_session_id(scope="hwinfo") package_session_id = self.store.get_session_id(scope="package") self.store.drop_session_ids(scopes=["hwinfo", "disk"]) self.assertTrue(self.store.is_valid_session_id(global_session_id)) self.assertFalse(self.store.is_valid_session_id(disk_session_id)) self.assertFalse(self.store.is_valid_session_id(hwinfo_session_id)) self.assertTrue(self.store.is_valid_session_id(package_session_id)) def test_record_failure_sets_first_failure_time(self): """first-failure-time recorded when calling record_failure().""" self.store.record_failure(123) self.assertEqual( 123, self.store._persist.get("first-failure-time")) def test_messages_rejected_if_failure_older_than_one_week(self): """Messages stop accumulating after one week of not being sent.""" self.store.record_failure(0) self.store.record_failure(7 * 24 * 60 * 60) self.assertIsNot(None, self.store.add({"type": "empty"})) self.store.record_failure((7 * 24 * 60 * 60) + 1) self.assertIs(None, self.store.add({"type": "empty"})) self.assertIn("WARNING: Unable to succesfully communicate with " "Landscape server for more than a week. Waiting for " "resync.", self.logfile.getvalue()) # Resync message and the first one we added right on the week boundary self.assertEqual(2, len(self.store.get_pending_messages())) def test_no_new_messages_after_discarded_following_one_week(self): """ After one week of not being sent, no new messages are queued. """ self.store.record_failure(0) self.store.add({"type": "empty"}) self.store.record_failure((7 * 24 * 60 * 60) + 1) self.store.add({"type": "empty"}) self.assertIs(None, self.store.add({"type": "empty"})) self.assertIn("DEBUG: Dropped message, awaiting resync.", self.logfile.getvalue()) def test_after_clearing_blackhole_messages_are_accepted_again(self): """After a successful exchange, messages are accepted again.""" self.store.record_failure(0) self.store.add({"type": "empty"}) self.store.record_failure((7 * 24 * 60 * 60) + 1) self.store.add({"type": "empty"}) self.assertIs(None, self.store.add({"type": "empty"})) self.store.record_success((7 * 24 * 60 * 60) + 2) self.assertIsNot(None, self.store.add({"type": "empty"})) def test_resync_requested_after_one_week_of_failures(self): """After a week of failures, a resync is requested.""" self.store.record_failure(0) self.store.add({"type": "empty"}) self.store.record_failure((7 * 24 * 60 * 60) + 1) [empty, message] = self.store.get_pending_messages() self.assertEqual("resynchronize", message["type"]) landscape-client-14.01/landscape/broker/tests/test_registration.py0000644000175000017500000014105712301414317025250 0ustar andreasandreasimport json import os import logging import pycurl import socket from twisted.internet.defer import succeed, fail from landscape.broker.registration import ( InvalidCredentialsError, RegistrationHandler, is_cloud_managed, EC2_HOST, EC2_API, Identity) from landscape.broker.config import BrokerConfiguration from landscape.tests.helpers import LandscapeTest from landscape.broker.tests.helpers import ( BrokerConfigurationHelper, RegistrationHelper) from landscape.lib.bpickle import dumps from landscape.lib.fetch import HTTPCodeError, FetchError from landscape.lib.persist import Persist from landscape.lib.vm_info import get_vm_info from landscape.configuration import print_text class IdentityTest(LandscapeTest): helpers = [BrokerConfigurationHelper] def setUp(self): super(IdentityTest, self).setUp() self.persist = Persist(filename=self.makePersistFile()) self.identity = Identity(self.config, self.persist) def check_persist_property(self, attr, persist_name): value = "VALUE" self.assertEqual(getattr(self.identity, attr), None, "%r attribute should default to None, not %r" % (attr, getattr(self.identity, attr))) setattr(self.identity, attr, value) self.assertEqual(getattr(self.identity, attr), value, "%r attribute should be %r, not %r" % (attr, value, getattr(self.identity, attr))) self.assertEqual( self.persist.get(persist_name), value, "%r not set to %r in persist" % (persist_name, value)) def check_config_property(self, attr): value = "VALUE" setattr(self.config, attr, value) self.assertEqual(getattr(self.identity, attr), value, "%r attribute should be %r, not %r" % (attr, value, getattr(self.identity, attr))) def test_secure_id(self): self.check_persist_property("secure_id", "registration.secure-id") def test_insecure_id(self): self.check_persist_property("insecure_id", "registration.insecure-id") def test_computer_title(self): self.check_config_property("computer_title") def test_account_name(self): self.check_config_property("account_name") def test_registration_key(self): self.check_config_property("registration_key") def test_client_tags(self): self.check_config_property("tags") def test_access_group(self): self.check_config_property("access_group") class RegistrationHandlerTestBase(LandscapeTest): helpers = [RegistrationHelper] def setUp(self): super(RegistrationHandlerTestBase, self).setUp() logging.getLogger().setLevel(logging.INFO) self.hostname = "ooga.local" self.addCleanup(setattr, socket, "getfqdn", socket.getfqdn) socket.getfqdn = lambda: self.hostname class RegistrationHandlerTest(RegistrationHandlerTestBase): def test_server_initiated_id_changing(self): """ The server must be able to ask a client to change its secure and insecure ids even if no requests were sent. """ self.exchanger.handle_message( {"type": "set-id", "id": "abc", "insecure-id": "def"}) self.assertEqual(self.identity.secure_id, "abc") self.assertEqual(self.identity.insecure_id, "def") def test_registration_done_event(self): """ When new ids are received from the server, a "registration-done" event is fired. """ reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-done") self.mocker.replay() self.exchanger.handle_message( {"type": "set-id", "id": "abc", "insecure-id": "def"}) def test_unknown_id(self): self.identity.secure_id = "old_id" self.identity.insecure_id = "old_id" self.mstore.set_accepted_types(["register"]) self.exchanger.handle_message({"type": "unknown-id"}) self.assertEqual(self.identity.secure_id, None) self.assertEqual(self.identity.insecure_id, None) def test_unknown_id_with_clone(self): """ If the server reports us that we are a clone of another computer, then set our computer's title accordingly. """ self.config.computer_title = "Wu" self.mstore.set_accepted_types(["register"]) self.exchanger.handle_message({"type": "unknown-id", "clone-of": "Wu"}) self.assertEqual("Wu (clone)", self.config.computer_title) self.assertIn("Client is clone of computer Wu", self.logfile.getvalue()) def test_should_register(self): self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.assertTrue(self.handler.should_register()) def test_should_register_with_existing_id(self): self.mstore.set_accepted_types(["register"]) self.identity.secure_id = "secure" self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.assertFalse(self.handler.should_register()) def test_should_register_without_computer_title(self): self.mstore.set_accepted_types(["register"]) self.config.computer_title = None self.assertFalse(self.handler.should_register()) def test_should_register_without_account_name(self): self.mstore.set_accepted_types(["register"]) self.config.account_name = None self.assertFalse(self.handler.should_register()) def test_should_register_with_unaccepted_message(self): self.assertFalse(self.handler.should_register()) def test_queue_message_on_exchange(self): """ When a computer_title and account_name are available, no secure_id is set, and an exchange is about to happen, queue a registration message. """ self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) self.assertEqual("register", messages[0]["type"]) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' without a password.") def test_queue_message_on_exchange_with_vm_info(self): """ When a computer_title and account_name are available, no secure_id is set, and an exchange is about to happen, queue a registration message with VM information. """ get_vm_info_mock = self.mocker.replace(get_vm_info) get_vm_info_mock() self.mocker.result("vmware") self.mocker.replay() self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("vmware", messages[0]["vm-info"]) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' without a password.") def test_queue_message_on_exchange_with_lxc_container(self): """ If the client is running in an LXC container, the information is included in the registration message. """ get_container_info_mock = self.mocker.replace( "landscape.lib.vm_info.get_container_info") get_container_info_mock() self.mocker.result("lxc") self.mocker.replay() self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("lxc", messages[0]["container-info"]) def test_queue_message_on_exchange_with_password(self): """If a registration password is available, we pass it on!""" self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() password = messages[0]["registration_password"] self.assertEqual("SEKRET", password) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' with a password.") def test_queue_message_on_exchange_with_tags(self): """ If the admin has defined tags for this computer, we send them to the server. """ self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" self.config.tags = u"computer,tag" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("computer,tag", messages[0]["tags"]) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' and tags computer,tag " "with a password.") def test_queue_message_on_exchange_with_invalid_tags(self): """ If the admin has defined tags for this computer, but they are not valid, we drop them, and report an error. """ self.log_helper.ignore_errors("Invalid tags provided for cloud " "registration") self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" self.config.tags = u"" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertIs(None, messages[0]["tags"]) self.assertEqual(self.logfile.getvalue().strip(), "ERROR: Invalid tags provided for cloud " "registration.\n " "INFO: Queueing message to register with account " "'account_name' with a password.") def test_queue_message_on_exchange_with_unicode_tags(self): """ If the admin has defined tags for this computer, we send them to the server. """ self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" self.config.tags = u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() expected = u"prova\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}o" self.assertEqual(expected, messages[0]["tags"]) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' and tags prova\xc4\xb5o " "with a password.") def test_queue_message_on_exchange_with_access_group(self): """ If the admin has defined an access_group for this computer, we send it to the server. """ self.mstore.set_accepted_types(["register"]) self.config.account_name = "account_name" self.config.access_group = u"dinosaurs" self.config.tags = u"server,london" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual("dinosaurs", messages[0]["access_group"]) self.assertEqual(self.logfile.getvalue().strip(), "INFO: Queueing message to register with account " "'account_name' in access group 'dinosaurs' and " "tags server,london without a password.") def test_queue_message_on_exchange_with_empty_access_group(self): """ If the access_group is "", then the outgoing message does not define an "access_group" key. """ self.mstore.set_accepted_types(["register"]) self.config.access_group = u"" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() # Make sure the key does not appear in the outgoing message. self.assertNotIn("access_group", messages[0]) def test_queue_message_on_exchange_with_none_access_group(self): """ If the access_group is None, then the outgoing message does not define an "access_group" key. """ self.mstore.set_accepted_types(["register"]) self.config.access_group = None self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() # Make sure the key does not appear in the outgoing message. self.assertNotIn("access_group", messages[0]) def test_queueing_registration_message_resets_message_store(self): """ When a registration message is queued, the store is reset entirely, since everything else that was queued is meaningless now that we're trying to register again. """ self.mstore.set_accepted_types(["register", "test"]) self.mstore.add({"type": "test"}) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "register") def test_no_message_when_should_register_is_false(self): """If we already have a secure id, do not queue a register message. """ handler_mock = self.mocker.patch(self.handler) handler_mock.should_register() self.mocker.result(False) self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" # If we didn't fake it, it'd work. We do that to ensure that # all the needed data is in place, and that this method is # really what decides if a message is sent or not. This way # we can test it individually. self.assertTrue(self.handler.should_register()) # Now let's see. self.mocker.replay() self.reactor.fire("pre-exchange") self.assertMessages(self.mstore.get_pending_messages(), []) def test_registration_failed_event(self): """ The deferred returned by a registration request should fail with L{InvalidCredentialsError} if the server responds with a failure message. """ reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.replay() self.exchanger.handle_message( {"type": "registration", "info": "unknown-account"}) def test_registration_failed_event_not_fired_when_uncertain(self): """ If the data in the registration message isn't what we expect, the event isn't fired. """ reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.count(0) self.mocker.replay() self.exchanger.handle_message( {"type": "registration", "info": "blah-blah"}) def test_register_resets_ids(self): self.identity.secure_id = "foo" self.identity.insecure_id = "bar" self.handler.register() self.assertEqual(self.identity.secure_id, None) self.assertEqual(self.identity.insecure_id, None) def test_register_calls_urgent_exchange(self): exchanger_mock = self.mocker.patch(self.exchanger) exchanger_mock.exchange() self.mocker.passthrough() self.mocker.replay() self.handler.register() def test_register_deferred_called_on_done(self): # We don't want informational messages. self.logger.setLevel(logging.WARNING) calls = [0] d = self.handler.register() def add_call(result): self.assertEqual(result, None) calls[0] += 1 d.addCallback(add_call) # This should somehow callback the deferred. self.exchanger.handle_message( {"type": "set-id", "id": "abc", "insecure-id": "def"}) self.assertEqual(calls, [1]) # Doing it again to ensure that the deferred isn't called twice. self.exchanger.handle_message( {"type": "set-id", "id": "abc", "insecure-id": "def"}) self.assertEqual(calls, [1]) self.assertEqual(self.logfile.getvalue(), "") def test_resynchronize_fired_when_registration_done(self): """ When we call C{register} this should trigger a "resynchronize-clients" event with global scope. """ results = [] def append(scopes=None): results.append(scopes) self.reactor.call_on("resynchronize-clients", append) self.handler.register() # This should somehow callback the deferred. self.exchanger.handle_message( {"type": "set-id", "id": "abc", "insecure-id": "def"}) self.assertEqual(results, [None]) def test_register_deferred_called_on_failed(self): # We don't want informational messages. self.logger.setLevel(logging.WARNING) calls = [0] d = self.handler.register() def add_call(failure): exception = failure.value self.assertTrue(isinstance(exception, InvalidCredentialsError)) calls[0] += 1 d.addErrback(add_call) # This should somehow callback the deferred. self.exchanger.handle_message( {"type": "registration", "info": "unknown-account"}) self.assertEqual(calls, [1]) # Doing it again to ensure that the deferred isn't called twice. self.exchanger.handle_message( {"type": "registration", "info": "unknown-account"}) self.assertEqual(calls, [1]) self.assertEqual(self.logfile.getvalue(), "") def test_exchange_done_calls_exchange(self): exchanger_mock = self.mocker.patch(self.exchanger) exchanger_mock.exchange() self.mocker.passthrough() self.mocker.replay() self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("exchange-done") def test_exchange_done_wont_call_exchange_when_just_tried(self): exchanger_mock = self.mocker.patch(self.exchanger) exchanger_mock.exchange() self.mocker.count(0) self.mocker.replay() self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.reactor.fire("pre-exchange") self.reactor.fire("exchange-done") def test_default_hostname(self): self.mstore.set_accepted_types(["register"]) self.config.computer_title = "Computer Title" self.config.account_name = "account_name" self.config.registration_key = "SEKRET" self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(socket.getfqdn(), messages[0]["hostname"]) class JujuRegistrationHandlerTest(RegistrationHandlerTestBase): juju_contents = json.dumps({"environment-uuid": "DEAD-BEEF", "unit-name": "service/0", "api-addresses": "10.0.3.1:17070", "private-address": "127.0.0.1"}) def test_juju_information_added_when_present(self): """ When Juju information is found in $data_dir/juju-info.json, key parts of it are sent in the registration message. """ self.mstore.set_accepted_types(["register"]) self.config.account_name = "account_name" self.reactor.fire("run") self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() expected = {"environment-uuid": "DEAD-BEEF", "api-addresses": ["10.0.3.1:17070"], "unit-name": "service/0"} self.assertEqual(expected, messages[0]["juju-info"]) class CloudRegistrationHandlerTest(RegistrationHandlerTestBase): cloud = True def setUp(self): super(CloudRegistrationHandlerTest, self).setUp() self.query_results = {} def fetch_stub(url): value = self.query_results[url] if isinstance(value, Exception): return fail(value) else: return succeed(value) self.fetch_func = fetch_stub def get_user_data(self, otps=None, exchange_url="https://example.com/message-system", ping_url="http://example.com/ping", ssl_ca_certificate=None): if otps is None: otps = ["otp1"] user_data = {"otps": otps, "exchange-url": exchange_url, "ping-url": ping_url} if ssl_ca_certificate is not None: user_data["ssl-ca-certificate"] = ssl_ca_certificate return user_data def prepare_query_results( self, user_data=None, instance_key="key1", launch_index=0, local_hostname="ooga.local", public_hostname="ooga.amazon.com", reservation_key=u"res1", ramdisk_key=u"ram1", kernel_key=u"kernel1", image_key=u"image1", local_ip="10.0.0.1", public_ip="10.0.0.2", ssl_ca_certificate=None): if user_data is None: user_data = self.get_user_data( ssl_ca_certificate=ssl_ca_certificate) if not isinstance(user_data, Exception): user_data = dumps(user_data) api_base = "http://169.254.169.254/latest" self.query_results.clear() for url_suffix, value in [ ("/user-data", user_data), ("/meta-data/instance-id", instance_key), ("/meta-data/reservation-id", reservation_key), ("/meta-data/local-hostname", local_hostname), ("/meta-data/public-hostname", public_hostname), ("/meta-data/ami-launch-index", str(launch_index)), ("/meta-data/kernel-id", kernel_key), ("/meta-data/ramdisk-id", ramdisk_key), ("/meta-data/ami-id", image_key), ("/meta-data/local-ipv4", local_ip), ("/meta-data/public-ipv4", public_ip), ]: self.query_results[api_base + url_suffix] = value def prepare_cloud_registration(self, account_name=None, registration_key=None, tags=None, access_group=None): # Set things up so that the client thinks it should register self.mstore.set_accepted_types(list(self.mstore.get_accepted_types()) + ["register-cloud-vm"]) self.config.account_name = account_name self.config.registration_key = registration_key self.config.computer_title = None self.config.tags = tags self.config.access_group = access_group self.identity.secure_id = None self.assertTrue(self.handler.should_register()) def get_expected_cloud_message(self, **kwargs): """ Return the message which is expected from a similar call to L{get_registration_handler_for_cloud}. """ message = dict(type="register-cloud-vm", otp="otp1", hostname="ooga.local", local_hostname="ooga.local", public_hostname="ooga.amazon.com", instance_key=u"key1", reservation_key=u"res1", ramdisk_key=u"ram1", kernel_key=u"kernel1", launch_index=0, image_key=u"image1", account_name=None, registration_password=None, local_ipv4=u"10.0.0.1", public_ipv4=u"10.0.0.2", tags=None) # The get_vm_info() needs to be deferred to the else. If vm-info is # not specified in kwargs, get_vm_info() will typically be mocked. if "vm_info" in kwargs: message["vm-info"] = kwargs.pop("vm_info") else: message["vm-info"] = get_vm_info() message.update(kwargs) return message def test_cloud_registration(self): """ When the 'cloud' configuration variable is set, cloud registration is done instead of normal password-based registration. This means: - "Launch Data" is fetched from the EC2 Launch Data URL. This contains a one-time password that is used during registration. - A different "register-cloud-vm" message is sent to the server instead of "register", containing the OTP. This message is handled by immediately accepting the computer, instead of going through the pending computer stage. """ get_vm_info_mock = self.mocker.replace(get_vm_info) get_vm_info_mock() self.mocker.result("xen") self.mocker.replay() self.prepare_query_results() self.prepare_cloud_registration(tags=u"server,london") # metadata is fetched and stored at reactor startup: self.reactor.fire("run") # And the metadata returned determines the URLs that are used self.assertEqual(self.transport.get_url(), "https://example.com/message-system") self.assertEqual(self.pinger.get_url(), "http://example.com/ping") # Lets make sure those values were written back to the config file new_config = BrokerConfiguration() new_config.load_configuration_file(self.config_filename) self.assertEqual(new_config.url, "https://example.com/message-system") self.assertEqual(new_config.ping_url, "http://example.com/ping") # Okay! Exchange should cause the registration to happen. self.exchanger.exchange() # This *should* be asynchronous, but I think a billion tests are # written like this self.assertEqual(len(self.transport.payloads), 1) self.assertMessages( self.transport.payloads[0]["messages"], [self.get_expected_cloud_message(tags=u"server,london", vm_info="xen")]) def test_cloud_registration_with_access_group(self): """ If the access_group field is presnet in the configuration, the access_group field is present in the outgoing message for a VM registration, and a notice appears in the logs. """ self.prepare_query_results() self.prepare_cloud_registration(access_group=u"dinosaurs", tags=u"server,london") self.reactor.fire("run") self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) self.assertMessages( self.transport.payloads[0]["messages"], [self.get_expected_cloud_message( access_group=u"dinosaurs", tags=u"server,london")]) def test_cloud_registration_with_otp(self): """ If the OTP is present in the configuration, it's used to trigger the registration instead of using the user data. """ self.config.otp = "otp1" self.prepare_query_results(user_data=None) self.prepare_cloud_registration() # metadata is fetched and stored at reactor startup: self.reactor.fire("run") # Okay! Exchange should cause the registration to happen. self.exchanger.exchange() # This *should* be asynchronous, but I think a billion tests are # written like this self.assertEqual(len(self.transport.payloads), 1) self.assertMessages( self.transport.payloads[0]["messages"], [self.get_expected_cloud_message()]) def test_cloud_registration_with_invalid_tags(self): """ Invalid tags in the configuration should result in the tags not being sent to the server, and this fact logged. """ self.log_helper.ignore_errors("Invalid tags provided for cloud " "registration") self.prepare_query_results() self.prepare_cloud_registration(tags=u",hardy") # metadata is fetched and stored at reactor startup: self.reactor.fire("run") self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message(tags=None)]) self.assertEqual(self.logfile.getvalue().strip()[:-7], "ERROR: Invalid tags provided for cloud " "registration.\n " "INFO: Queueing message to register with OTP\n " "INFO: Starting message exchange with " "https://example.com/message-system.\n " "INFO: Message exchange completed in") def test_cloud_registration_with_ssl_ca_certificate(self): """ If we have an SSL certificate CA included in the user-data, this should be written out, and the configuration updated to reflect this. """ key_filename = os.path.join(self.config.data_path, "%s.ssl_public_key" % os.path.basename(self.config_filename)) print_text_mock = self.mocker.replace(print_text) print_text_mock("Writing SSL CA certificate to %s..." % key_filename) self.mocker.replay() self.prepare_query_results(ssl_ca_certificate=u"1234567890") self.prepare_cloud_registration(tags=u"server,london") # metadata is fetched and stored at reactor startup: self.reactor.fire("run") # And the metadata returned determines the URLs that are used self.assertEqual("https://example.com/message-system", self.transport.get_url()) self.assertEqual(key_filename, self.transport._pubkey) self.assertEqual("http://example.com/ping", self.pinger.get_url()) # Let's make sure those values were written back to the config file new_config = BrokerConfiguration() new_config.load_configuration_file(self.config_filename) self.assertEqual("https://example.com/message-system", new_config.url) self.assertEqual("http://example.com/ping", new_config.ping_url) self.assertEqual(key_filename, new_config.ssl_public_key) self.assertEqual("1234567890", open(key_filename, "r").read()) def test_wrong_user_data(self): self.prepare_query_results(user_data="other stuff, not a bpickle") self.prepare_cloud_registration() # Mock registration-failed call reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.replay() self.reactor.fire("run") self.exchanger.exchange() def test_wrong_object_type_in_user_data(self): self.prepare_query_results(user_data=True) self.prepare_cloud_registration() # Mock registration-failed call reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.replay() self.reactor.fire("run") self.exchanger.exchange() def test_user_data_with_not_enough_elements(self): """ If the AMI launch index isn't represented in the list of OTPs in the user data then BOOM. """ self.prepare_query_results(launch_index=1) self.prepare_cloud_registration() # Mock registration-failed call reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.replay() self.reactor.fire("run") self.exchanger.exchange() def test_user_data_bpickle_without_otp(self): self.prepare_query_results(user_data={"foo": "bar"}) self.prepare_cloud_registration() # Mock registration-failed call reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("registration-failed") self.mocker.replay() self.reactor.fire("run") self.exchanger.exchange() def test_no_otp_fallback_to_account(self): self.prepare_query_results(user_data="other stuff, not a bpickle", instance_key=u"key1") self.prepare_cloud_registration(account_name=u"onward", registration_key=u"password", tags=u"london,server") self.reactor.fire("run") self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message( otp=None, account_name=u"onward", registration_password=u"password", tags=u"london,server")]) self.assertEqual(self.logfile.getvalue().strip()[:-7], "INFO: Queueing message to register with account u'onward' and " "tags london,server as an EC2 instance.\n " "INFO: Starting message exchange with http://localhost:91919.\n " "INFO: Message exchange completed in") def test_queueing_cloud_registration_message_resets_message_store(self): """ When a registration from a cloud is about to happen, the message store is reset, because all previous messages are now meaningless. """ self.mstore.set_accepted_types(list(self.mstore.get_accepted_types()) + ["test"]) self.mstore.add({"type": "test"}) self.prepare_query_results() self.prepare_cloud_registration() self.reactor.fire("run") self.reactor.fire("pre-exchange") messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "register-cloud-vm") def test_cloud_registration_fetch_errors(self): """ If fetching metadata fails, and we have no account details to fall back to, we fire 'registration-failed'. """ self.log_helper.ignore_errors(pycurl.error) def fetch_stub(url): return fail(pycurl.error(7, "couldn't connect to host")) self.handler = RegistrationHandler( self.config, self.identity, self.reactor, self.exchanger, self.pinger, self.mstore, fetch_async=fetch_stub) self.fetch_stub = fetch_stub self.prepare_query_results() self.fetch_stub = fetch_stub self.prepare_cloud_registration() failed = [] self.reactor.call_on( "registration-failed", lambda: failed.append(True)) self.log_helper.ignore_errors("Got error while fetching meta-data") self.reactor.fire("run") self.exchanger.exchange() self.assertEqual(failed, [True]) self.assertIn('error: (7, "couldn\'t connect to host")', self.logfile.getvalue()) def test_cloud_registration_continues_without_user_data(self): """ If no user-data exists (i.e., the user-data URL returns a 404), then register-cloud-vm still occurs. """ self.log_helper.ignore_errors(HTTPCodeError) self.prepare_query_results(user_data=HTTPCodeError(404, "ohno")) self.prepare_cloud_registration(account_name="onward", registration_key="password") self.reactor.fire("run") self.exchanger.exchange() self.assertIn("HTTPCodeError: Server returned HTTP code 404", self.logfile.getvalue()) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message( otp=None, account_name=u"onward", registration_password=u"password")]) def test_cloud_registration_continues_without_ramdisk(self): """ If the instance doesn't have a ramdisk (ie, the query for ramdisk returns a 404), then register-cloud-vm still occurs. """ self.log_helper.ignore_errors(HTTPCodeError) self.prepare_query_results(ramdisk_key=HTTPCodeError(404, "ohno")) self.prepare_cloud_registration() self.reactor.fire("run") self.exchanger.exchange() self.assertIn("HTTPCodeError: Server returned HTTP code 404", self.logfile.getvalue()) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message( ramdisk_key=None)]) def test_cloud_registration_continues_without_kernel(self): """ If the instance doesn't have a kernel (ie, the query for kernel returns a 404), then register-cloud-vm still occurs. """ self.log_helper.ignore_errors(HTTPCodeError) self.prepare_query_results(kernel_key=HTTPCodeError(404, "ohno")) self.prepare_cloud_registration() self.reactor.fire("run") self.exchanger.exchange() self.assertIn("HTTPCodeError: Server returned HTTP code 404", self.logfile.getvalue()) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message( kernel_key=None)]) def test_fall_back_to_normal_registration_when_metadata_fetch_fails(self): """ If fetching metadata fails, but we do have an account name, then we fall back to normal 'register' registration. """ self.mstore.set_accepted_types(["register"]) self.log_helper.ignore_errors(HTTPCodeError) self.prepare_query_results( public_hostname=HTTPCodeError(404, "ohnoes")) self.prepare_cloud_registration(account_name="onward", registration_key="password") self.config.computer_title = "whatever" self.reactor.fire("run") self.exchanger.exchange() self.assertIn("HTTPCodeError: Server returned HTTP code 404", self.logfile.getvalue()) self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] self.assertEqual("register", messages[0]["type"]) def test_should_register_in_cloud(self): """ The client should register when it's in the cloud even though it doesn't have the normal account details. """ self.mstore.set_accepted_types(self.mstore.get_accepted_types() + ("register-cloud-vm",)) self.config.account_name = None self.config.registration_key = None self.config.computer_title = None self.identity.secure_id = None self.assertTrue(self.handler.should_register()) def test_launch_index(self): """ The client used the value in C{ami-launch-index} to choose the appropriate OTP in the user data. """ otp = "correct otp for launch index" self.prepare_query_results( user_data=self.get_user_data(otps=["wrong index", otp, "wrong again"],), instance_key="key1", launch_index=1) self.prepare_cloud_registration() self.reactor.fire("run") self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [self.get_expected_cloud_message(otp=otp, launch_index=1)]) def test_should_not_register_in_cloud(self): """ Having a secure ID means we shouldn't register, even in the cloud. """ self.mstore.set_accepted_types(self.mstore.get_accepted_types() + ("register-cloud-vm",)) self.config.account_name = None self.config.registration_key = None self.config.computer_title = None self.identity.secure_id = "hello" self.assertFalse(self.handler.should_register()) def test_should_not_register_without_register_cloud_vm(self): """ If the server isn't accepting a 'register-cloud-vm' message, we shouldn't register. """ self.config.account_name = None self.config.registration_key = None self.config.computer_title = None self.identity.secure_id = None self.assertFalse(self.handler.should_register()) class IsCloudManagedTests(LandscapeTest): def setUp(self): super(IsCloudManagedTests, self).setUp() self.urls = [] self.responses = [] def fake_fetch(self, url, connect_timeout=None): self.urls.append((url, connect_timeout)) return self.responses.pop(0) def mock_socket(self): """ Mock out socket usage by is_cloud_managed to wait for the network. """ # Mock the socket.connect call that it also does socket_class = self.mocker.replace("socket.socket", passthrough=False) socket = socket_class() socket.connect((EC2_HOST, 80)) socket.close() def test_is_managed(self): """ L{is_cloud_managed} returns True if the EC2 user-data contains Landscape instance information. It fetches the EC2 data with low timeouts. """ user_data = {"otps": ["otp1"], "exchange-url": "http://exchange", "ping-url": "http://ping"} self.responses = [dumps(user_data), "0"] self.mock_socket() self.mocker.replay() self.assertTrue(is_cloud_managed(self.fake_fetch)) self.assertEqual( self.urls, [(EC2_API + "/user-data", 5), (EC2_API + "/meta-data/ami-launch-index", 5)]) def test_is_managed_index(self): user_data = {"otps": ["otp1", "otp2"], "exchange-url": "http://exchange", "ping-url": "http://ping"} self.responses = [dumps(user_data), "1"] self.mock_socket() self.mocker.replay() self.assertTrue(is_cloud_managed(self.fake_fetch)) def test_is_managed_wrong_index(self): user_data = {"otps": ["otp1"], "exchange-url": "http://exchange", "ping-url": "http://ping"} self.responses = [dumps(user_data), "1"] self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(self.fake_fetch)) def test_is_managed_exchange_url(self): user_data = {"otps": ["otp1"], "ping-url": "http://ping"} self.responses = [dumps(user_data), "0"] self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(self.fake_fetch)) def test_is_managed_ping_url(self): user_data = {"otps": ["otp1"], "exchange-url": "http://exchange"} self.responses = [dumps(user_data), "0"] self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(self.fake_fetch)) def test_is_managed_bpickle(self): self.responses = ["some other user data", "0"] self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(self.fake_fetch)) def test_is_managed_no_data(self): self.responses = ["", "0"] self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(self.fake_fetch)) def test_is_managed_fetch_not_found(self): def fake_fetch(url, connect_timeout=None): raise HTTPCodeError(404, "ohnoes") self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(fake_fetch)) def test_is_managed_fetch_error(self): def fake_fetch(url, connect_timeout=None): raise FetchError(7, "couldn't connect to host") self.mock_socket() self.mocker.replay() self.assertFalse(is_cloud_managed(fake_fetch)) def test_waits_for_network(self): """ is_cloud_managed will wait until the network before trying to fetch the EC2 user data. """ user_data = {"otps": ["otp1"], "exchange-url": "http://exchange", "ping-url": "http://ping"} self.responses = [dumps(user_data), "0"] self.mocker.order() time_sleep = self.mocker.replace("time.sleep", passthrough=False) socket_class = self.mocker.replace("socket.socket", passthrough=False) socket_obj = socket_class() socket_obj.connect((EC2_HOST, 80)) self.mocker.throw(socket.error("woops")) time_sleep(1) socket_obj = socket_class() socket_obj.connect((EC2_HOST, 80)) self.mocker.result(None) socket_obj.close() self.mocker.replay() self.assertTrue(is_cloud_managed(self.fake_fetch)) def test_waiting_times_out(self): """ We'll only wait five minutes for the network to come up. """ def fake_fetch(url, connect_timeout=None): raise FetchError(7, "couldn't connect to host") self.mocker.order() time_sleep = self.mocker.replace("time.sleep", passthrough=False) time_time = self.mocker.replace("time.time", passthrough=False) time_time() self.mocker.result(100) socket_class = self.mocker.replace("socket.socket", passthrough=False) socket_obj = socket_class() socket_obj.connect((EC2_HOST, 80)) self.mocker.throw(socket.error("woops")) time_sleep(1) time_time() self.mocker.result(401) self.mocker.replay() # Mocking time.time is dangerous, because the test harness calls it. So # we explicitly reset mocker before returning from the test. try: self.assertFalse(is_cloud_managed(fake_fetch)) finally: self.mocker.reset() class ProvisioningRegistrationTest(RegistrationHandlerTestBase): def test_provisioned_machine_registration_with_otp(self): """ Register provisioned machines using an OTP. """ self.mstore.set_accepted_types(["register-provisioned-machine"]) self.config.account_name = "" self.config.provisioning_otp = "ohteepee" self.reactor.fire("pre-exchange") self.assertMessages([{"otp": "ohteepee", "timestamp": 0, "api": "3.2", "type": "register-provisioned-machine"}], self.mstore.get_pending_messages()) self.assertEqual(u"INFO: Queueing message to register with OTP as a" u" newly provisioned machine.", self.logfile.getvalue().strip()) self.exchanger.exchange() self.assertMessages([{"otp": "ohteepee", "timestamp": 0, "api": "3.2", "type": "register-provisioned-machine"}], self.transport.payloads[0]["messages"]) def test_provisioned_machine_registration_with_empty_otp(self): """ No message should be sent when an empty OTP is passed. """ self.mstore.set_accepted_types(["register-provisioned-machine"]) self.config.account_name = "" self.config.provisioning_otp = "" self.reactor.fire("pre-exchange") self.assertMessages([], self.mstore.get_pending_messages()) self.assertEqual(u"", self.logfile.getvalue().strip()) landscape-client-14.01/landscape/broker/tests/test_amp.py0000644000175000017500000002425712301414317023315 0ustar andreasandreasfrom landscape.lib.amp import MethodCallError from landscape.tests.helpers import LandscapeTest, DEFAULT_ACCEPTED_TYPES from landscape.broker.tests.helpers import ( RemoteBrokerHelper, RemoteClientHelper) class RemoteBrokerTest(LandscapeTest): helpers = [RemoteBrokerHelper] def test_ping(self): """ The L{RemoteBroker.ping} method calls the C{ping} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.ping() return self.assertSuccess(result, True) def test_register_client(self): """ The L{RemoteBroker.register_client} method forwards a registration request to the remote L{BrokerServer} object. """ self.broker.register_client = self.mocker.mock() self.expect(self.broker.register_client("client")) self.mocker.replay() result = self.remote.register_client("client") return self.assertSuccess(result) def test_send_message(self): """ The L{RemoteBroker.send_message} method calls the C{send_message} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.successResultOf(self.remote.get_session_id()) message_id = self.successResultOf( self.remote.send_message(message, session_id)) self.assertTrue(isinstance(message_id, int)) self.assertTrue(self.mstore.is_pending(message_id)) self.assertFalse(self.exchanger.is_urgent()) self.assertMessages(self.mstore.get_pending_messages(), [message]) def test_send_message_with_urgent(self): """ The L{RemoteBroker.send_message} method honors the urgent argument. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.successResultOf(self.remote.get_session_id()) message_id = self.successResultOf(self.remote.send_message( message, session_id, urgent=True)) self.assertTrue(isinstance(message_id, int)) self.assertTrue(self.exchanger.is_urgent()) def test_is_message_pending(self): """ The L{RemoteBroker.is_message_pending} method calls the C{is_message_pending} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.is_message_pending(1234) return self.assertSuccess(result, False) def test_stop_clients(self): """ The L{RemoteBroker.stop_clients} method calls the C{stop_clients} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.stop_clients() return self.assertSuccess(result, None) def test_reload_configuration(self): """ The L{RemoteBroker.reload_configuration} method calls the C{reload_configuration} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.reload_configuration() return self.assertSuccess(result, None) def test_register(self): """ The L{RemoteBroker.register} method calls the C{register} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ # This should make the registration succeed self.transport.responses.append([{"type": "set-id", "id": "abc", "insecure-id": "def"}]) result = self.remote.register() return self.assertSuccess(result, None) def test_get_accepted_message_types(self): """ The L{RemoteBroker.get_accepted_message_types} method calls the C{get_accepted_message_types} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.get_accepted_message_types() return self.assertSuccess(result, self.mstore.get_accepted_types()) def test_get_server_uuid(self): """ The L{RemoteBroker.get_server_uuid} method calls the C{get_server_uuid} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ self.mstore.set_server_uuid("abcde") result = self.remote.get_server_uuid() return self.assertSuccess(result, "abcde") def test_register_client_accepted_message_type(self): """ The L{RemoteBroker.register_client_accepted_message_type} method calls the C{register_client_accepted_message_type} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ def assert_response(response): self.assertEqual(response, None) self.assertEqual( self.exchanger.get_client_accepted_message_types(), sorted(["type"] + DEFAULT_ACCEPTED_TYPES)) result = self.remote.register_client_accepted_message_type("type") return result.addCallback(assert_response) def test_exit(self): """ The L{RemoteBroker.exit} method calls the C{exit} method of the remote L{BrokerServer} instance and returns its result with a L{Deferred}. """ result = self.remote.exit() return self.assertSuccess(result, None) def test_call_if_accepted(self): """ The L{RemoteBroker.call_if_accepted} method calls a function if the given message type is accepted. """ self.mstore.set_accepted_types(["test"]) function = self.mocker.mock() self.expect(function(123)).result("cool") self.mocker.replay() result = self.remote.call_if_accepted("test", function, 123) return self.assertSuccess(result, "cool") def test_call_if_accepted_with_not_accepted(self): """ The L{RemoteBroker.call_if_accepted} method doesn't do anything if the given message type is not accepted. """ function = lambda: 1 / 0 result = self.remote.call_if_accepted("test", function) return self.assertSuccess(result, None) def test_listen_events(self): """ L{RemoteBroker.listen_events} returns a deferred which fires when the first of the given events occurs in the broker reactor. """ deferred = self.remote.listen_events(["event1", "event2"]) self.reactor.call_later(0.05, self.reactor.fire, "event2") self.reactor.advance(0.05) self.remote._factory.fake_connection.flush() self.assertEqual("event2", self.successResultOf(deferred)) def test_call_on_events(self): """ L{RemoteBroker.call_on_events} fires the given callback when the first of the given events occurs in the broker reactor. """ callback1 = self.mocker.mock() self.expect(callback1()).count(0) callback2 = self.mocker.mock() self.expect(callback2()).result(123) self.mocker.replay() deferred = self.remote.call_on_event({"event1": callback1, "event2": callback2}) self.reactor.call_later(0.05, self.reactor.fire, "event2") self.reactor.advance(0.05) self.remote._factory.fake_connection.flush() self.assertEqual(123, self.successResultOf(deferred)) def test_fire_event(self): """ The L{RemoteBroker.fire_event} method fires an event in the broker reactor. """ callback = self.mocker.mock() callback() self.mocker.replay() self.reactor.call_on("event", callback) return self.assertSuccess(self.remote.fire_event("event")) def test_method_call_error(self): """ Trying to call an non-exposed broker method results in a failure. """ deferred = self.remote.get_clients() self.failureResultOf(deferred).trap(MethodCallError) class RemoteClientTest(LandscapeTest): helpers = [RemoteClientHelper] def test_ping(self): """ The L{RemoteClient.ping} method calls the C{ping} method of the remote L{BrokerClient} instance and returns its result with a L{Deferred}. """ result = self.remote_client.ping() return self.assertSuccess(result, True) def test_message(self): """ The L{RemoteClient.message} method calls the C{message} method of the remote L{BrokerClient} instance and returns its result with a L{Deferred}. """ handler = self.mocker.mock() handler({"type": "test"}) self.client.broker = self.mocker.mock() self.client.broker.register_client_accepted_message_type("test") self.mocker.replay() # We need to register a test message handler to let the dispatch # message method call succeed self.client.register_message("test", handler) result = self.remote_client.message({"type": "test"}) return self.assertSuccess(result, True) def test_fire_event(self): """ The L{RemoteClient.fire_event} method calls the C{fire_event} method of the remote L{BrokerClient} instance and returns its result with a L{Deferred}. """ callback = self.mocker.mock() callback(True, kwarg=2) self.mocker.replay() self.client_reactor.call_on("event", callback) result = self.remote_client.fire_event("event", True, kwarg=2) return self.assertSuccess(result, [None]) def test_exit(self): """ The L{RemoteClient.exit} method calls the C{exit} method of the remote L{BrokerClient} instance and returns its result with a L{Deferred}. """ result = self.remote_client.exit() return self.assertSuccess(result, None) def test_method_call_error(self): """ Trying to call an non-exposed client method results in a failure. """ deferred = self.remote_client.get_plugins() self.failureResultOf(deferred).trap(MethodCallError) landscape-client-14.01/landscape/broker/tests/test_service.py0000644000175000017500000000623112301414317024170 0ustar andreasandreasimport os from landscape.tests.helpers import LandscapeTest from landscape.broker.tests.helpers import BrokerConfigurationHelper from landscape.broker.service import BrokerService from landscape.broker.transport import HTTPTransport from landscape.broker.amp import RemoteBrokerConnector from landscape.reactor import FakeReactor class BrokerServiceTest(LandscapeTest): helpers = [BrokerConfigurationHelper] def setUp(self): super(BrokerServiceTest, self).setUp() class FakeBrokerService(BrokerService): reactor_factory = FakeReactor self.service = FakeBrokerService(self.config) def test_persist(self): """ A L{BrokerService} instance has a proper C{persist} attribute. """ self.assertEqual( self.service.persist.filename, os.path.join(self.config.data_path, "broker.bpickle")) def test_transport(self): """ A L{BrokerService} instance has a proper C{transport} attribute. """ self.assertTrue(isinstance(self.service.transport, HTTPTransport)) self.assertEqual(self.service.transport.get_url(), self.config.url) def test_message_store(self): """ A L{BrokerService} instance has a proper C{message_store} attribute. """ self.assertEqual(self.service.message_store.get_accepted_types(), ()) def test_identity(self): """ A L{BrokerService} instance has a proper C{identity} attribute. """ self.assertEqual(self.service.identity.account_name, "some_account") def test_pinger(self): """ A L{BrokerService} instance has a proper C{pinger} attribute. Its interval value is configured with the C{ping_interval} value. """ self.assertEqual(self.service.pinger.get_url(), self.config.ping_url) self.assertEqual(30, self.service.pinger.get_interval()) self.config.ping_interval = 20 service = BrokerService(self.config) self.assertEqual(20, service.pinger.get_interval()) def test_registration(self): """ A L{BrokerService} instance has a proper C{registration} attribute. """ self.assertEqual(self.service.registration.should_register(), False) def test_start_stop(self): """ The L{BrokerService.startService} method makes the process start listening to the broker socket, and starts the L{Exchanger} and the L{Pinger} as well. """ self.service.exchanger.start = self.mocker.mock() self.service.exchanger.start() self.service.pinger.start = self.mocker.mock() self.service.pinger.start() self.service.exchanger.stop = self.mocker.mock() self.service.exchanger.stop() self.mocker.replay() self.service.startService() reactor = FakeReactor() connector = RemoteBrokerConnector(reactor, self.config) connected = connector.connect() connected.addCallback(lambda remote: remote.get_server_uuid()) connected.addCallback(lambda x: connector.disconnect()) connected.addCallback(lambda x: self.service.stopService()) return connected landscape-client-14.01/landscape/broker/tests/test_server.py0000644000175000017500000005355412301414317024050 0ustar andreasandreasfrom twisted.internet.defer import succeed, fail from configobj import ConfigObj from landscape.manager.manager import FAILED from landscape.tests.helpers import LandscapeTest, DEFAULT_ACCEPTED_TYPES from landscape.broker.tests.helpers import ( BrokerServerHelper, RemoteClientHelper) from landscape.broker.tests.test_ping import FakePageGetter class FakeClient(object): pass class FakeCreator(object): def __init__(self, reactor, config): pass def connect(self): return succeed(FakeClient()) class BrokerServerTest(LandscapeTest): helpers = [BrokerServerHelper] def test_ping(self): """ The L{BrokerServer.ping} simply returns C{True}. """ self.assertTrue(self.broker.ping()) def test_get_session_id(self): """ The L{BrokerServer.get_session_id} method gets the same session ID from the L{MessageStore} until it is dropped. """ session_id1 = self.broker.get_session_id() session_id2 = self.broker.get_session_id() self.assertEqual(session_id1, session_id2) self.mstore.drop_session_ids() session_id3 = self.broker.get_session_id() self.assertNotEqual(session_id1, session_id3) def test_get_session_id_with_scope(self): """ The L{BrokerServer.get_session_id} method gets the same session ID from the L{MessageStore} for the same scope, but a new session ID for a new scope. """ disk_session_id1 = self.broker.get_session_id(scope="disk") disk_session_id2 = self.broker.get_session_id(scope="disk") users_session_id = self.broker.get_session_id(scope="users") self.assertEqual(disk_session_id1, disk_session_id2) self.assertNotEqual(disk_session_id1, users_session_id) def test_send_message(self): """ The L{BrokerServer.send_message} method forwards a message to the broker's exchanger. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.broker.get_session_id() self.broker.send_message(message, session_id) self.assertMessages(self.mstore.get_pending_messages(), [message]) self.assertFalse(self.exchanger.is_urgent()) def test_send_message_with_urgent(self): """ The L{BrokerServer.send_message} can optionally specify the urgency of the message. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.broker.get_session_id() self.broker.send_message(message, session_id, urgent=True) self.assertMessages(self.mstore.get_pending_messages(), [message]) self.assertTrue(self.exchanger.is_urgent()) def test_send_message_wont_send_with_invalid_session_id(self): """ The L{BrokerServer.send_message} call will silently drop messages that have invalid session ids as they must have been generated prior to the last resync request - this guards against out of context data being sent to the server. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) self.broker.send_message(message, "Not Valid") self.assertMessages(self.mstore.get_pending_messages(), []) def test_send_message_with_none_as_session_id_raises(self): """ We should never call C{send_message} without first obtaining a session id. Attempts to do so should raise to alert the developer to their mistake. """ message = {"type": "test"} self.mstore.set_accepted_types(["test"]) self.assertRaises( RuntimeError, self.broker.send_message, message, None) def test_is_pending(self): """ The L{BrokerServer.is_pending} method indicates if a message with the given id is pending waiting for delivery in the message store. """ self.assertFalse(self.broker.is_message_pending(123)) message = {"type": "test"} self.mstore.set_accepted_types(["test"]) session_id = self.broker.get_session_id() message_id = self.broker.send_message(message, session_id) self.assertTrue(self.broker.is_message_pending(message_id)) def test_register_client(self): """ The L{BrokerServer.register_client} method can be used to register client components that need to communicate with the server. After the registration they can be fetched with L{BrokerServer.get_clients}. """ self.assertEqual(self.broker.get_clients(), []) self.assertEqual(self.broker.get_client("test"), None) self.assertEqual(self.broker.get_connectors(), []) self.assertEqual(self.broker.get_connector("test"), None) def assert_registered(ignored): self.assertEqual(len(self.broker.get_clients()), 1) self.assertEqual(len(self.broker.get_connectors()), 1) self.assertTrue( isinstance(self.broker.get_client("test"), FakeClient)) self.assertTrue( isinstance(self.broker.get_connector("test"), FakeCreator)) self.broker.connectors_registry = {"test": FakeCreator} result = self.broker.register_client("test") return result.addCallback(assert_registered) def test_stop_clients(self): """ The L{BrokerServer.stop_clients} method calls the C{exit} method of each registered client, and returns a deferred resulting in C{None} if all C{exit} calls were successful. """ self.broker.connectors_registry = {"foo": FakeCreator, "bar": FakeCreator} self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): client.exit = self.mocker.mock() self.expect(client.exit()).result(succeed(None)) self.mocker.replay() return self.assertSuccess(self.broker.stop_clients()) def test_stop_clients_with_failure(self): """ The L{BrokerServer.stop_clients} method calls the C{exit} method of each registered client, and returns a deferred resulting in C{None} if all C{exit} calls were successful. """ self.broker.connectors_registry = {"foo": FakeCreator, "bar": FakeCreator} self.broker.register_client("foo") self.broker.register_client("bar") [client1, client2] = self.broker.get_clients() client1.exit = self.mocker.mock() client2.exit = self.mocker.mock() self.expect(client1.exit()).result(succeed(None)) self.expect(client2.exit()).result(fail(Exception())) self.mocker.replay() return self.assertFailure(self.broker.stop_clients(), Exception) def test_reload_configuration(self): """ The L{BrokerServer.reload_configuration} method forces the config file associated with the broker server to be reloaded. """ config_obj = ConfigObj(self.config_filename) config_obj["client"]["computer_title"] = "New Title" config_obj.write() result = self.broker.reload_configuration() result.addCallback(lambda x: self.assertEqual( self.config.computer_title, "New Title")) return result def test_reload_configuration_stops_clients(self): """ The L{BrokerServer.reload_configuration} method forces the config file associated with the broker server to be reloaded. """ self.broker.connectors_registry = {"foo": FakeCreator, "bar": FakeCreator} self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): client.exit = self.mocker.mock() self.expect(client.exit()).result(succeed(None)) self.mocker.replay() return self.assertSuccess(self.broker.reload_configuration()) def test_register(self): """ The L{BrokerServer.register} method attempts to register with the Landscape server and waits for a C{set-id} message from it. """ registered = self.broker.register() # This should callback the deferred. self.exchanger.handle_message({"type": "set-id", "id": "abc", "insecure-id": "def"}) return self.assertSuccess(registered) def test_get_accepted_types_empty(self): """ The L{BrokerServer.get_accepted_message_types} returns an empty list if no message types are accepted by the Landscape server. """ self.mstore.set_accepted_types([]) self.assertEqual(self.broker.get_accepted_message_types(), []) def test_get_accepted_message_types(self): """ The L{BrokerServer.get_accepted_message_types} returns the list of message types accepted by the Landscape server. """ self.mstore.set_accepted_types(["foo", "bar"]) self.assertEqual(sorted(self.broker.get_accepted_message_types()), ["bar", "foo"]) def test_get_server_uuid_with_unset_uuid(self): """ The L{BrokerServer.get_server_uuid} method returns C{None} if the uuid of the Landscape server we're pointing at is unknown. """ self.assertEqual(self.broker.get_server_uuid(), None) def test_get_server_uuid(self): """ The L{BrokerServer.get_server_uuid} method returns the uuid of the Landscape server we're pointing at. """ self.mstore.set_server_uuid("the-uuid") self.assertEqual(self.broker.get_server_uuid(), "the-uuid") def test_register_client_accepted_message_type(self): """ The L{BrokerServer.register_client_accepted_message_type} method can register new message types accepted by this Landscape client. """ self.broker.register_client_accepted_message_type("type1") self.broker.register_client_accepted_message_type("type2") self.assertEqual(self.exchanger.get_client_accepted_message_types(), sorted(["type1", "type2"] + DEFAULT_ACCEPTED_TYPES)) def test_fire_event(self): """ The L{BrokerServer.fire_event} method fires an event in the broker reactor. """ callback = self.mocker.mock() callback() self.mocker.replay() self.reactor.call_on("event", callback) self.broker.fire_event("event") def test_exit(self): """ The L{BrokerServer.exit} method stops all registered clients. """ self.broker.connectors_registry = {"foo": FakeCreator, "bar": FakeCreator} self.broker.register_client("foo") self.broker.register_client("bar") for client in self.broker.get_clients(): client.exit = self.mocker.mock() self.expect(client.exit()).result(succeed(None)) self.mocker.replay() return self.assertSuccess(self.broker.exit()) def test_exit_exits_when_other_daemons_blow_up(self): """ If a broker client blow up in its exit() methods, exit should ignore the error and exit anyway. """ self.broker.connectors_registry = {"foo": FakeCreator} self.broker.register_client("foo") [client] = self.broker.get_clients() client.exit = self.mocker.mock() self.expect(client.exit()).result(fail(ZeroDivisionError())) self.mocker.replay() def assert_event(ignored): self.reactor.advance(1) result = self.broker.exit() return result.addCallback(assert_event) def test_exit_fires_reactor_events(self): """ The L{BrokerServer.exit} method stops the reactor after having requested all broker clients to shutdown. """ self.broker.connectors_registry = {"foo": FakeCreator} self.broker.register_client("foo") [client] = self.broker.get_clients() self.mocker.order() client.exit = self.mocker.mock() self.reactor.stop = self.mocker.mock() self.broker.stop_exchanger() self.expect(client.exit()).result(fail(ZeroDivisionError())) self.reactor.stop() self.mocker.replay() def assert_stopped(ignored): self.reactor.advance(1) result = self.broker.exit() return result.addCallback(assert_stopped) def test_listen_events(self): """ The L{BrokerServer.listen_events} method returns a deferred which is fired when the first of the given events occurs. """ result = self.broker.listen_events(["event1", "event2"]) self.reactor.fire("event2") return self.assertSuccess(result, "event2") def test_listen_event_only_once(self): """ The L{BrokerServer.listen_events} listens only to one occurrence of the given events. """ result = self.broker.listen_events(["event"]) self.assertEqual(self.reactor.fire("event"), [None]) self.assertEqual(self.reactor.fire("event"), []) return self.assertSuccess(result, "event") def test_listen_events_call_cancellation(self): """ The L{BrokerServer.listen_events} cleanly cancels event calls for unfired events, without interfering with unrelated handlers. """ self.broker.listen_events(["event"]) self.reactor.call_on("event", lambda: 123) # Unrelated handler self.assertEqual(self.reactor.fire("event"), [None, 123]) def test_stop_exchanger(self): """ The L{BrokerServer.stop_exchanger} stops the exchanger so no further messages are sent or consumed. """ self.pinger.start() self.exchanger.schedule_exchange() self.broker.stop_exchanger() self.reactor.advance(self.config.exchange_interval) self.assertFalse(self.transport.payloads) def test_stop_exchanger_stops_pinger(self): """ The L{BrokerServer.stop_exchanger} stops the pinger and no further pings are performed. """ url = "http://example.com/mysuperping" page_getter = FakePageGetter(None) self.pinger.start() self.config.ping_url = url self.pinger._ping_client.get_page = page_getter.get_page self.identity.insecure_id = 23 self.broker.stop_exchanger() self.reactor.advance(self.config.exchange_interval) self.assertEqual([], page_getter.fetches) class EventTest(LandscapeTest): helpers = [RemoteClientHelper] def test_resynchronize(self): """ The L{BrokerServer.resynchronize} method broadcasts a C{resynchronize} event to all connected clients. """ callback = self.mocker.mock() self.expect(callback(["foo"])).result("foo") self.mocker.replay() self.client_reactor.call_on("resynchronize", callback) return self.assertSuccess(self.broker.resynchronize(["foo"]), [["foo"]]) def test_impending_exchange(self): """ The L{BrokerServer.impending_exchange} method broadcasts an C{impending-exchange} event to all connected clients. """ plugin = self.mocker.mock() plugin.register(self.client) plugin.exchange() self.mocker.replay() self.client.add(plugin) return self.assertSuccess(self.broker.impending_exchange(), [[None]]) def test_broker_started(self): """ The L{BrokerServer.broker_started} method broadcasts a C{broker-started} event to all connected clients, which makes them re-registered any previously registered accepted message type. """ def assert_broker_started(ignored): self.remote.register_client_accepted_message_type = \ self.mocker.mock() self.remote.register_client_accepted_message_type("type") self.remote.register_client = self.mocker.mock() self.remote.register_client("client") self.mocker.replay() return self.assertSuccess(self.broker.broker_reconnect(), [[None]]) registered = self.client.register_message("type", lambda x: None) return registered.addCallback(assert_broker_started) def test_server_uuid_changed(self): """ The L{BrokerServer.server_uuid_changed} method broadcasts a C{server-uuid-changed} event to all connected clients. """ callback = self.mocker.mock() callback(None, "abc") self.mocker.replay() self.client_reactor.call_on("server-uuid-changed", callback) return self.assertSuccess(self.broker.server_uuid_changed(None, "abc"), [[None]]) def test_message_type_acceptance_changed(self): """ The L{BrokerServer.message_type_acceptance_changed} method broadcasts a C{message-type-acceptance-changed} event to all connected clients. """ callback = self.mocker.mock() callback(True) self.mocker.replay() self.client_reactor.call_on( ("message-type-acceptance-changed", "type"), callback) result = self.broker.message_type_acceptance_changed("type", True) return self.assertSuccess(result, [[None]]) def test_package_data_changed(self): """ The L{BrokerServer.package_data_changed} method broadcasts a C{package-data-changed} event to all connected clients. """ callback = self.mocker.mock() callback() self.mocker.replay() self.client_reactor.call_on("package-data-changed", callback) return self.assertSuccess(self.broker.package_data_changed(), [[None]]) class HandlersTest(LandscapeTest): helpers = [BrokerServerHelper] def setUp(self): super(HandlersTest, self).setUp() self.broker.connectors_registry = {"test": FakeCreator} self.broker.register_client("test") self.client = self.broker.get_client("test") def test_message(self): """ The L{BrokerServer} calls the C{message} method on all registered plugins when messages are received from the server. """ message = {"type": "foobar", "value": 42} self.client.message = self.mocker.mock() self.client.message(message) self.mocker.result(succeed(True)) self.mocker.replay() self.transport.responses.append([{"type": "foobar", "value": 42}]) self.exchanger.exchange() def test_message_failed_operation_without_plugins(self): """ When there are no broker plugins available to handle a message, an operation-result message should be sent back to the server indicating a failure. """ self.log_helper.ignore_errors("Nobody handled the foobar message.") self.mstore.set_accepted_types(["operation-result"]) message = {"type": "foobar", "operation-id": 4} self.client.message = self.mocker.mock() self.client.message(message) self.mocker.result(succeed(False)) self.mocker.replay() result = self.reactor.fire("message", message) result = [i for i in result if i is not None][0] class StartsWith(object): def __eq__(self, other): return other.startswith( "Landscape client failed to handle this request (foobar)") def broadcasted(ignored): self.assertMessages( self.mstore.get_pending_messages(), [{"type": "operation-result", "status": FAILED, "result-text": StartsWith(), "operation-id": 4}]) result.addCallback(broadcasted) return result def test_impending_exchange(self): """ When an C{impending-exchange} event is fired by the reactor, the broker broadcasts it to its clients. """ self.client.fire_event = self.mocker.mock() self.client.fire_event("impending-exchange") self.mocker.result(succeed(None)) self.mocker.replay() self.reactor.fire("impending-exchange") def test_message_type_acceptance_changed(self): """ When a C{message-type-acceptance-changed} event is fired by the reactor, the broker broadcasts it to its clients. """ self.client.fire_event = self.mocker.mock() self.client.fire_event("message-type-acceptance-changed", "test", True) self.mocker.result(succeed(None)) self.mocker.replay() self.reactor.fire("message-type-acceptance-changed", "test", True) def test_server_uuid_changed(self): """ When a C{server-uuid-changed} event is fired by the reactor, the broker broadcasts it to its clients. """ self.client.fire_event = self.mocker.mock() self.client.fire_event("server-uuid-changed", None, 123) self.mocker.result(succeed(None)) self.mocker.replay() self.reactor.fire("server-uuid-changed", None, 123) def test_package_data_changed(self): """ When a C{package-data-changed} event is fired by the reactor, the broker broadcasts it to its clients. """ self.client.fire_event = self.mocker.mock() self.client.fire_event("package-data-changed") self.mocker.result(succeed(None)) self.mocker.replay() self.reactor.fire("package-data-changed") def test_resynchronize_clients(self): """ When a C{resynchronize} event is fired by the reactor, the broker broadcasts it to its clients. """ self.client.fire_event = self.mocker.mock() self.client.fire_event("resynchronize") self.mocker.result(succeed(None)) self.mocker.replay() self.reactor.fire("resynchronize-clients") landscape-client-14.01/landscape/broker/tests/badpublic.ssl0000644000175000017500000000254312301414317023571 0ustar andreasandreas-----BEGIN CERTIFICATE----- MIIDzjCCAzegAwIBAgIJANqT3vXxSVFjMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD VQQGEwJCUjEPMA0GA1UECBMGUGFyYW5hMREwDwYDVQQHEwhDdXJpdGliYTEhMB8G A1UEChMYRmFrZSBMYW5kc2NhcGUgKFRlc3RpbmcpMREwDwYDVQQLEwhTZWN1cml0 eTESMBAGA1UEAxMJbG9jYWxob3N0MSQwIgYJKoZIhvcNAQkBFhVhbmRyZWFzQGNh bm9uaWNhbC5jb20wHhcNMDkwMTA5MTUyNTAwWhcNMTkwMTA3MTUyNTAwWjCBoTEL MAkGA1UEBhMCQlIxDzANBgNVBAgTBlBhcmFuYTERMA8GA1UEBxMIQ3VyaXRpYmEx ITAfBgNVBAoTGEZha2UgTGFuZHNjYXBlIChUZXN0aW5nKTERMA8GA1UECxMIU2Vj dXJpdHkxEjAQBgNVBAMTCWxvY2FsaG9zdDEkMCIGCSqGSIb3DQEJARYVYW5kcmVh c0BjYW5vbmljYWwuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGYFWP 2Ine2OFIPjX+Tu+S403KW63EWq/I1DYXiezLoUpYPed30tAkAXH1gOwQZbARFlUn 0LgvXDSpuQLvgKQZwP/e1D8SvZUZ6nexW+aYlPE9kjd1dhK1xpe1h5y09AjCz02x xzcFzrJrJ47uU7vV+FGArE8FFh3hO+dz0/PmZQIDAQABo4IBCjCCAQYwHQYDVR0O BBYEFF4A8+YHCLAt19OtWTjIjBKzLUokMIHWBgNVHSMEgc4wgcuAFF4A8+YHCLAt 19OtWTjIjBKzLUokoYGnpIGkMIGhMQswCQYDVQQGEwJCUjEPMA0GA1UECBMGUGFy YW5hMREwDwYDVQQHEwhDdXJpdGliYTEhMB8GA1UEChMYRmFrZSBMYW5kc2NhcGUg KFRlc3RpbmcpMREwDwYDVQQLEwhTZWN1cml0eTESMBAGA1UEAxMJbG9jYWxob3N0 MSQwIgYJKoZIhvcNAQkBFhVhbmRyZWFzQGNhbm9uaWNhbC5jb22CCQDak9718UlR YzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBABszkA3CCzt+nTOX+A7/ I98DvI0W1Ss0J+Tq+diLr+kw6Z5ZTj5hrIS/x6XhVHjpim4724UBXA0Sels4JXbw hhJovuncExce316gAol/9eEzTffZ9mt1jZQy9LL7IAENiobnsj2F65zNaJzXp5UC rE/h/xIxz9rAmXtVOWHqZLcw -----END CERTIFICATE----- landscape-client-14.01/landscape/broker/tests/private.ssl0000644000175000017500000000156712301414317023323 0ustar andreasandreas-----BEGIN RSA PRIVATE KEY----- MIICWwIBAAKBgQDX2VNEDZHtl5nimNocshar8pBmjqiGn9olCR2LcKifuJY4bFTg qib+Rr3v2DwDTbOMaquRSxFgwLJLCug3WclsGrYSPIsFCx+k3XhqM61JXEwrKuIp Js893XHkeg3SEFua/oVfDxNfJttoHW3FbsnDx5964kYwGExjJcH73GInUQIDAQAB AoGASiM9NEys6Lx/gJMbp2uL2fdwnak2PTc+iCX/XduOL34JKswawyfuSLwnlO/i fQf9OaeR0k/EYkUNeDUA2bIfOj6wWS8tamnX4fxL7A20y5VyqMMah8mcerZgtPdS 7ZtYCbeijWSKpHgjALc2Hym7R68WZI+IHe0DQkcW6WxOMFkCQQD2jqHZn/Qtd62u mWVwIx6G7+Po5vzd86KyWWftdUtVCY9DmiX1rmWXbJhLnmaKCLkmHxyBvw7Biarr ZnCAafebAkEA4B2dSpLi7bAzjCa7JBtzV9kr1FVZOl2vA+9BqTAjCQu0b9VDEm8V x0061Z8rN7Og3ECGtKH/r3/4RnHUPpwJgwJAdyZQkvHYt4xJc8IPolRmcUFGu4u9 Eammq1fHgJqZcBvxjvLUe1jvIXFKW+jNltFGYGTSiuUAxYi4/49+uJ/9FwJAGBB1 /DTrcvQxhMH/5C+iYfNqtmD3tMGscjK1jTIjAOyl0kBG9GrDHuRXBesSW+fIxP2U uT6P0std4EqGrLZaewJAHT0n/3tXnsPjj+BMlC4ZkRKgPJ4I7zTU1XSlLY5zbMoV NvtHLlq7ttiarsH95xyge69uV1/zJVj/IiS71YY9PQ== -----END RSA PRIVATE KEY----- landscape-client-14.01/landscape/broker/tests/badprivate.ssl0000644000175000017500000000157312301414317023767 0ustar andreasandreas-----BEGIN RSA PRIVATE KEY----- MIICXgIBAAKBgQDGYFWP2Ine2OFIPjX+Tu+S403KW63EWq/I1DYXiezLoUpYPed3 0tAkAXH1gOwQZbARFlUn0LgvXDSpuQLvgKQZwP/e1D8SvZUZ6nexW+aYlPE9kjd1 dhK1xpe1h5y09AjCz02xxzcFzrJrJ47uU7vV+FGArE8FFh3hO+dz0/PmZQIDAQAB AoGBAKfv+983yJfgcO9QwzLULlrilQNfk36r6y6QAG7y84T7uU10spSs4kno80mL 58yF2YTNrC91scdePrMEDikldUVcCqtPYcZKHyw5+4aGaDDO244tznexOQnQcNIe 2BbLFuh+jmJpoFIY/H7EsLQQzn6+6dGPnYGBQfiyitWfAXRNAkEA/ShQkYCRAHgq g6WBIYsw/ISQydhiMiKrL2ZUXERT+pWU9MoSdMskgyMi3S7wzwJQXkrHA36q8QkL +H8n5K+f5wJBAMiajfEtv0wRW0awX40qJtuqW3cSKeGHBH9mMObcRJd5OcK6giC/ Cc5st/ZcuE/8i4r44DfeC+cwY6QdIqI8rdMCQQCKuq78LWJIyZEyt12+ThK4LsVR d1zIcKsyvHb6YQ9MQPBx/NKEYlZN7tFKOFEKgBAevAe3aJCwqe5/bN8luQB9AkEA uQVD8bR+AgzoIPS/zJWaLXSc09/e3PIJBfAdHnD+mq7mxWH8b3OD+e5wZjvyi2Ok 2NLfCug0FlGdNVrh/Lz2nQJATdcNvHNzJcWOHe05lo+xAqkjz73FWGpPNrdXRigG YnjIsZVy4k48xIxPhT2rC44yo1iPEP5EnHCE2bLyUlTAYA== -----END RSA PRIVATE KEY----- landscape-client-14.01/landscape/broker/tests/test_exchange.py0000644000175000017500000013441612301414317024321 0ustar andreasandreasfrom landscape import SERVER_API, CLIENT_API from landscape.lib.persist import Persist from landscape.lib.hashlib import md5 from landscape.lib.fetch import fetch_async from landscape.schema import Message, Int from landscape.broker.config import BrokerConfiguration from landscape.broker.exchange import get_accepted_types_diff, MessageExchange from landscape.broker.transport import FakeTransport from landscape.broker.store import MessageStore from landscape.broker.ping import Pinger from landscape.broker.registration import RegistrationHandler from landscape.tests.helpers import (LandscapeTest, DEFAULT_ACCEPTED_TYPES) from landscape.tests.mocker import MATCH from landscape.broker.tests.helpers import ExchangeHelper from landscape.broker.server import BrokerServer class RaisingTransport(object): def get_url(self2): return "" def exchange(self2, *args): raise RuntimeError("Failed to communicate.") class MessageExchangeTest(LandscapeTest): helpers = [ExchangeHelper] def setUp(self): super(MessageExchangeTest, self).setUp() self.mstore.add_schema(Message("empty", {})) self.mstore.add_schema(Message("data", {"data": Int()})) self.mstore.add_schema(Message("holdme", {})) self.identity.secure_id = 'needs-to-be-set-for-tests-to-pass' def wait_for_exchange(self, urgent=False, factor=1, delta=0): if urgent: seconds = self.config.urgent_exchange_interval else: seconds = self.config.exchange_interval self.reactor.advance(seconds * factor + delta) def test_resynchronize_causes_urgent_exchange(self): """ A 'resynchronize-clients' messages causes an urgent exchange to be scheduled. """ self.assertFalse(self.exchanger.is_urgent()) self.reactor.fire("resynchronize-clients") self.assertTrue(self.exchanger.is_urgent()) def test_that_resynchronize_drops_session_ids(self): """ When a resynchronisation event occurs with global scope all existing session IDs are expired, so any new messages being sent with those IDs will be discarded. """ broker = BrokerServer(self.config, self.reactor, self.exchanger, None, self.mstore, None) disk_session_id = self.mstore.get_session_id(scope="disk") package_session_id = self.mstore.get_session_id(scope="package") self.mstore.set_accepted_types(["empty"]) global_scope = [] self.reactor.fire("resynchronize-clients", global_scope) broker.send_message({"type": "empty"}, disk_session_id) broker.send_message({"type": "empty"}, package_session_id) self.exchanger.exchange() messages = self.transport.payloads[0]["messages"] self.assertMessages(messages, []) def test_that_resynchronize_drops_scoped_session_ids_only(self): """ When a resynchronisation event occurs with a scope existing session IDs for that scope are expired, all other session IDs are unaffected. """ broker = BrokerServer(self.config, self.reactor, self.exchanger, None, self.mstore, None) disk_session_id = self.mstore.get_session_id(scope="disk") package_session_id = self.mstore.get_session_id(scope="package") self.mstore.set_accepted_types(["empty"]) disk_scope = ["disk"] self.reactor.fire("resynchronize-clients", disk_scope) broker.send_message({"type": "empty"}, disk_session_id) broker.send_message({"type": "empty"}, package_session_id) self.exchanger.exchange() messages = self.transport.payloads[0]["messages"] self.assertMessages(messages, [{"type": "empty"}]) def test_that_resynchronize_clears_message_blackhole(self): """ When a resynchronisation event occurs the block on new messages being stored is lifted. """ self.reactor.fire("resynchronize-clients", []) persist = Persist(filename=self.persist_filename) self.assertFalse(persist.get("blackhole-messages")) def test_send(self): """ The send method should cause a message to show up in the next exchange. """ self.mstore.set_accepted_types(["empty"]) self.exchanger.send({"type": "empty"}) self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) messages = self.transport.payloads[0]["messages"] self.assertEqual(messages, [{"type": "empty", "timestamp": 0, "api": SERVER_API}]) def test_send_urgent(self): """ Sending a message with the urgent flag should schedule an urgent exchange. """ self.mstore.set_accepted_types(["empty"]) self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [{"type": "empty"}]) def test_send_urgent_wont_reschedule(self): """ If an urgent exchange is already scheduled, adding another urgent message shouldn't reschedule the exchange forward. """ self.mstore.set_accepted_types(["empty"]) self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True, factor=0.5) self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True, factor=0.5) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [{"type": "empty"}, {"type": "empty"}]) def test_send_returns_message_id(self): """ The send method should return the message id, as returned by add(). """ self.mstore.set_accepted_types(["empty"]) message_id = self.exchanger.send({"type": "empty"}) self.assertTrue(self.mstore.is_pending(message_id)) self.mstore.add_pending_offset(1) self.assertFalse(self.mstore.is_pending(message_id)) def test_wb_include_accepted_types(self): """ Every payload from the client needs to specify an ID which represents the types that we think the server wants. """ payload = self.exchanger._make_payload() self.assertIn("accepted-types", payload) self.assertEqual(payload["accepted-types"], md5("").digest()) def test_handle_message_sets_accepted_types(self): """ An incoming "accepted-types" message should set the accepted types. """ self.exchanger.handle_message( {"type": "accepted-types", "types": ["foo"]}) self.assertEqual(self.mstore.get_accepted_types(), ["foo"]) def test_message_type_acceptance_changed_event(self): stash = [] def callback(type, accepted): stash.append((type, accepted)) self.reactor.call_on("message-type-acceptance-changed", callback) self.exchanger.handle_message( {"type": "accepted-types", "types": ["a", "b"]}) self.exchanger.handle_message( {"type": "accepted-types", "types": ["b", "c"]}) self.assertEqual(stash, [("a", True), ("b", True), ("a", False), ("c", True)]) def test_wb_accepted_types_roundtrip(self): """ Telling the client to set the accepted types with a message should affect its future payloads. """ self.exchanger.handle_message( {"type": "accepted-types", "types": ["ack", "bar"]}) payload = self.exchanger._make_payload() self.assertIn("accepted-types", payload) self.assertEqual(payload["accepted-types"], md5("ack;bar").digest()) def test_accepted_types_causes_urgent_if_held_messages_exist(self): """ If an accepted-types message makes available a type for which we have a held message, an urgent exchange should occur. """ self.exchanger.send({"type": "holdme"}) self.assertEqual(self.mstore.get_pending_messages(), []) self.exchanger.handle_message( {"type": "accepted-types", "types": ["holdme"]}) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) self.assertMessages(self.transport.payloads[0]["messages"], [{"type": "holdme"}]) def test_accepted_types_no_urgent_without_held(self): """ If an accepted-types message does *not* "unhold" any exist messages, then no urgent exchange should occur. """ self.exchanger.send({"type": "holdme"}) self.assertEqual(self.transport.payloads, []) self.reactor.fire("message", {"type": "accepted-types", "types": ["irrelevant"]}) self.assertEqual(len(self.transport.payloads), 0) def test_sequence_is_committed_immediately(self): """ The MessageStore should be committed by the MessageExchange as soon as possible after setting the pending offset and sequence. """ self.mstore.set_accepted_types(["empty"]) # We'll check that the message store has been saved by the time a # message handler gets called. self.transport.responses.append([{"type": "inbound"}]) self.exchanger.send({"type": "empty"}) handled = [] def handler(message): persist = Persist(filename=self.persist_filename) store = MessageStore(persist, self.config.message_store_path) self.assertEqual(store.get_pending_offset(), 1) self.assertEqual(store.get_sequence(), 1) handled.append(True) self.exchanger.register_message("inbound", handler) self.exchanger.exchange() self.assertEqual(handled, [True], self.logfile.getvalue()) def test_messages_from_server_commit(self): """ The Exchange should commit the message store after processing each message. """ self.transport.responses.append([{"type": "inbound"}] * 3) handled = [] self.message_counter = 0 def handler(message): Persist(filename=self.persist_filename) store = MessageStore(self.persist, self.config.message_store_path) self.assertEqual(store.get_server_sequence(), self.message_counter) self.message_counter += 1 handled.append(True) self.exchanger.register_message("inbound", handler) self.exchanger.exchange() self.assertEqual(handled, [True] * 3, self.logfile.getvalue()) def test_messages_from_server_causing_urgent_exchanges(self): """ If a message from the server causes an urgent message to be queued, an urgent exchange should happen again after the running exchange. """ self.transport.responses.append([{"type": "foobar"}]) self.mstore.set_accepted_types(["empty"]) def handler(message): self.exchanger.send({"type": "empty"}, urgent=True) self.exchanger.register_message("foobar", handler) self.exchanger.exchange() self.assertEqual(len(self.transport.payloads), 1) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 2) self.assertMessages(self.transport.payloads[1]["messages"], [{"type": "empty"}]) def test_server_expects_older_messages(self): """ If the server expects an old message, the exchanger should be marked as urgent. """ self.mstore.set_accepted_types(["data"]) self.mstore.add({"type": "data", "data": 0}) self.mstore.add({"type": "data", "data": 1}) self.exchanger.exchange() self.assertEqual(self.mstore.get_sequence(), 2) self.mstore.add({"type": "data", "data": 2}) self.mstore.add({"type": "data", "data": 3}) # next one, server will respond with 1! def desynched_send_data(*args, **kwargs): self.transport.next_expected_sequence = 1 return {"next-expected-sequence": 1} self.transport.exchange = desynched_send_data self.exchanger.exchange() self.assertEqual(self.mstore.get_sequence(), 1) del self.transport.exchange exchanged = [] def exchange_callback(): exchanged.append(True) self.reactor.call_on("exchange-done", exchange_callback) self.wait_for_exchange(urgent=True) self.assertEqual(exchanged, [True]) payload = self.transport.payloads[-1] self.assertMessages(payload["messages"], [{"type": "data", "data": 1}, {"type": "data", "data": 2}, {"type": "data", "data": 3}]) self.assertEqual(payload["sequence"], 1) self.assertEqual(payload["next-expected-sequence"], 0) def test_start_with_urgent_exchange(self): """ Immediately after registration, an urgent exchange should be scheduled. """ transport = FakeTransport() exchanger = MessageExchange(self.reactor, self.mstore, transport, self.identity, self.exchange_store, self.config) exchanger.start() self.wait_for_exchange(urgent=True) self.assertEqual(len(transport.payloads), 1) def test_reschedule_after_exchange(self): """ Under normal operation, after an exchange has finished another exchange should be scheduled for after the normal delay. """ self.exchanger.schedule_exchange(urgent=True) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) self.wait_for_exchange() self.assertEqual(len(self.transport.payloads), 2) self.wait_for_exchange() self.assertEqual(len(self.transport.payloads), 3) def test_leave_urgent_exchange_mode_after_exchange(self): """ After an urgent exchange, assuming no messages are left to be exchanged, urgent exchange should not remain scheduled. """ self.mstore.set_accepted_types(["empty"]) self.exchanger.send({"type": "empty"}, urgent=True) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) self.wait_for_exchange(urgent=True) self.assertEqual(len(self.transport.payloads), 1) # no change def test_successful_exchange_records_success(self): """ When a successful exchange occurs, that success is recorded in the message store. """ mock_message_store = self.mocker.proxy(self.mstore) mock_message_store.record_success(MATCH(lambda x: type(x) is int)) self.mocker.result(None) self.mocker.replay() exchanger = MessageExchange( self.reactor, mock_message_store, self.transport, self.identity, self.exchange_store, self.config) exchanger.exchange() def test_ancient_causes_resynchronize(self): """ If the server asks for messages that we no longer have, the message exchange plugin should send a message to the server indicating that a resynchronization is occuring and then fire a "resynchronize-clients" reactor message, so that plugins can generate new data -- if the server got out of synch with the client, then we're best off synchronizing everything back to it. """ self.mstore.set_accepted_types(["empty", "data", "resynchronize"]) # Do three generations of messages, so we "lose" the 0th message for i in range(3): self.mstore.add({"type": "empty"}) self.exchanger.exchange() # the server loses some data self.transport.next_expected_sequence = 0 def resynchronize(scopes=None): # We'll add a message to the message store here, since this is what # is commonly done in a resynchronize callback. This message added # should come AFTER the "resynchronize" message that is generated # by the exchange code itself. self.mstore.add({"type": "data", "data": 999}) self.reactor.call_on("resynchronize-clients", resynchronize) # This exchange call will notice the server is asking for an old # message and fire the event: self.exchanger.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "empty"}, {"type": "resynchronize"}, {"type": "data", "data": 999}]) def test_resynchronize_msg_causes_resynchronize_response_then_event(self): """ If a message of type 'resynchronize' is received from the server, the exchanger should *first* send a 'resynchronize' message back to the server and *then* fire a 'resynchronize-clients' event. """ self.mstore.set_accepted_types(["empty", "resynchronize"]) def resynchronized(scopes=None): self.mstore.add({"type": "empty"}) self.reactor.call_on("resynchronize-clients", resynchronized) self.transport.responses.append([{"type": "resynchronize", "operation-id": 123}]) self.exchanger.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "resynchronize", "operation-id": 123}, {"type": "empty"}]) def test_scopes_are_copied_from_incoming_resynchronize_messages(self): """ If an incoming message of type 'reysnchronize' contains a 'scopes' key, then it's value is copied into the "resynchronize-clients" event. """ fired_scopes = [] self.mstore.set_accepted_types(["reysnchronize"]) def resynchronized(scopes=None): fired_scopes.extend(scopes) self.reactor.call_on("resynchronize-clients", resynchronized) self.transport.responses.append([{"type": "resynchronize", "operation-id": 123, "scopes": ["disk", "users"]}]) self.exchanger.exchange() self.assertEqual(["disk", "users"], fired_scopes) def test_no_urgency_when_server_expects_current_message(self): """ When the message the server expects is the same as the first pending message sequence, the client should not go into urgent exchange mode. This means the server handler is likely blowing up and the client and the server are in a busy loop constantly asking for the same message, breaking, setting urgent exchange mode, sending the same message and then breaking in a fast loop. In this case, urgent exchange mode should not be set. (bug #138135) """ # We set the server sequence to some non-0 value to ensure that the # server and client sequences aren't the same to ensure the code is # looking at the correct sequence number. :( self.mstore.set_server_sequence(3300) self.mstore.set_accepted_types(["data"]) self.mstore.add({"type": "data", "data": 0}) def desynched_send_data(*args, **kwargs): self.transport.next_expected_sequence = 0 return {"next-expected-sequence": 0} self.transport.exchange = desynched_send_data self.exchanger.exchange() self.assertEqual(self.mstore.get_sequence(), 0) del self.transport.exchange exchanged = [] def exchange_callback(): exchanged.append(True) self.reactor.call_on("exchange-done", exchange_callback) self.wait_for_exchange(urgent=True) self.assertEqual(exchanged, []) self.wait_for_exchange() self.assertEqual(exchanged, [True]) def test_old_sequence_id_does_not_cause_resynchronize(self): resynchronized = [] self.reactor.call_on("resynchronize", lambda: resynchronized.append(True)) self.mstore.set_accepted_types(["empty"]) self.mstore.add({"type": "empty"}) self.exchanger.exchange() # the server loses some data, but not too much self.transport.next_expected_sequence = 0 self.exchanger.exchange() self.assertEqual(resynchronized, []) def test_per_api_payloads(self): """ When sending messages to the server, the exchanger should split messages with different APIs in different payloads, and deliver them to the right API on the server. """ types = ["a", "b", "c", "d", "e", "f"] self.mstore.set_accepted_types(types) for t in types: self.mstore.add_schema(Message(t, {})) self.exchanger.exchange() # No messages queued yet. Server API should default to # the client API. payload = self.transport.payloads[-1] self.assertMessages(payload["messages"], []) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), SERVER_API) self.assertEqual(self.transport.message_api, SERVER_API) self.mstore.add({"type": "a", "api": "1.0"}) self.mstore.add({"type": "b", "api": "1.0"}) self.mstore.add({"type": "c", "api": "1.1"}) self.mstore.add({"type": "d", "api": "1.1"}) # Simulate an old 2.0 client, which has no API on messages. self.mstore.add({"type": "e", "api": None}) self.mstore.add({"type": "f", "api": None}) self.exchanger.exchange() payload = self.transport.payloads[-1] self.assertMessages(payload["messages"], [{"type": "a", "api": "1.0"}, {"type": "b", "api": "1.0"}]) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), "1.0") self.assertEqual(self.transport.message_api, "1.0") self.exchanger.exchange() payload = self.transport.payloads[-1] self.assertMessages(payload["messages"], [{"type": "c", "api": "1.1"}, {"type": "d", "api": "1.1"}]) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), "1.1") self.assertEqual(self.transport.message_api, "1.1") self.exchanger.exchange() payload = self.transport.payloads[-1] self.assertMessages(payload["messages"], [{"type": "e", "api": None}, {"type": "f", "api": None}]) self.assertEqual(payload.get("client-api"), CLIENT_API) self.assertEqual(payload.get("server-api"), "2.0") self.assertEqual(self.transport.message_api, "2.0") def test_exchange_token(self): """ When sending messages to the server, the exchanger provides the token that the server itself gave it during the former exchange. """ self.exchanger.exchange() self.assertIs(None, self.transport.exchange_token) exchange_token = self.mstore.get_exchange_token() self.assertIsNot(None, exchange_token) self.exchanger.exchange() self.assertEqual(exchange_token, self.transport.exchange_token) def test_reset_exchange_token_on_failure(self): """ If an exchange fails we set the value of the next exchange token to C{None}, so we can authenticate ourselves even if we couldn't receive a valid token. """ self.mstore.set_exchange_token("abcd-efgh") self.mstore.commit() self.transport.exchange = lambda *args, **kwargs: None self.exchanger.exchange() # Check that the change was persisted persist = Persist(filename=self.persist_filename) store = MessageStore(persist, self.config.message_store_path) self.assertIs(None, store.get_exchange_token()) def test_include_total_messages_none(self): """ The payload includes the total number of messages that the client has pending for the server. """ self.mstore.set_accepted_types(["empty"]) self.exchanger.exchange() self.assertEqual(self.transport.payloads[0]["total-messages"], 0) def test_include_total_messages_some(self): """ If there are no more messages than those that are sent in the exchange, the total-messages is equivalent to the number of messages sent. """ self.mstore.set_accepted_types(["empty"]) self.mstore.add({"type": "empty"}) self.exchanger.exchange() self.assertEqual(self.transport.payloads[0]["total-messages"], 1) def test_include_total_messages_more(self): """ If there are more messages than those that are sent in the exchange, the total-messages is equivalent to the total number of messages pending. """ exchanger = MessageExchange(self.reactor, self.mstore, self.transport, self.identity, self.exchange_store, self.config, max_messages=1) self.mstore.set_accepted_types(["empty"]) self.mstore.add({"type": "empty"}) self.mstore.add({"type": "empty"}) exchanger.exchange() self.assertEqual(self.transport.payloads[0]["total-messages"], 2) def test_impending_exchange(self): """ A reactor event is emitted shortly (10 seconds) before an exchange occurs. """ self.exchanger.schedule_exchange() events = [] self.reactor.call_on("impending-exchange", lambda: events.append(True)) self.wait_for_exchange(delta=-11) self.assertEqual(events, []) self.reactor.advance(1) self.assertEqual(events, [True]) def test_impending_exchange_on_urgent(self): """ The C{impending-exchange} event is fired 10 seconds before urgent exchanges. """ # We create our own MessageExchange because the one set up by the text # fixture has an urgent exchange interval of 10 seconds, which makes # testing this awkward. self.config.urgent_exchange_interval = 20 exchanger = MessageExchange(self.reactor, self.mstore, self.transport, self.identity, self.exchange_store, self.config) exchanger.schedule_exchange(urgent=True) events = [] self.reactor.call_on("impending-exchange", lambda: events.append(True)) self.reactor.advance(9) self.assertEqual(events, []) self.reactor.advance(1) self.assertEqual(events, [True]) def test_impending_exchange_gets_reschudeled_with_urgent_reschedule(self): """ When an urgent exchange is scheduled after a regular exchange was scheduled but before it executed, the old C{impending-exchange} event should be cancelled and a new one should be scheduled for 10 seconds before the new urgent exchange. """ self.config.exchange_interval = 60 * 60 self.config.urgent_exchange_interval = 20 exchanger = MessageExchange(self.reactor, self.mstore, self.transport, self.identity, self.exchange_store, self.config) events = [] self.reactor.call_on("impending-exchange", lambda: events.append(True)) # This call will: # * schedule the exchange for an hour from now # * schedule impending-exchange to be fired an hour - 10 seconds from # now exchanger.schedule_exchange() # And this call will: # * hopefully cancel those previous calls # * schedule an exchange for 20 seconds from now # * schedule impending-exchange to be fired in 10 seconds exchanger.schedule_exchange(urgent=True) self.reactor.advance(10) self.assertEqual(events, [True]) self.reactor.advance(10) self.assertEqual(len(self.transport.payloads), 1) # Now the urgent exchange should be fired, which should automatically # schedule a regular exchange. # Let's make sure that that *original* impending-exchange event has # been cancelled: self.reactor.advance(60 * 60 # time till exchange - 10 # time till notification - 20) # time that we've already advanced self.assertEqual(events, [True]) # Ok, so no new events means that the original call was # cancelled. great. # Just a bit more sanity checking: self.reactor.advance(20) self.assertEqual(events, [True, True]) self.reactor.advance(10) self.assertEqual(len(self.transport.payloads), 2) def test_pre_exchange_event(self): reactor_mock = self.mocker.patch(self.reactor) reactor_mock.fire("pre-exchange") self.mocker.replay() self.exchanger.exchange() def test_schedule_exchange(self): self.exchanger.schedule_exchange() self.wait_for_exchange(urgent=True) self.assertFalse(self.transport.payloads) self.wait_for_exchange() self.assertTrue(self.transport.payloads) def test_schedule_urgent_exchange(self): self.exchanger.schedule_exchange(urgent=True) self.wait_for_exchange(urgent=True) self.assertTrue(self.transport.payloads) def test_exchange_failed_fires_correctly(self): """ Ensure that the exchange-failed event is fired if the exchanger raises an exception. """ def failed_send_data(*args, **kwargs): return None self.transport.exchange = failed_send_data exchanged = [] def exchange_failed_callback(): exchanged.append(True) self.reactor.call_on("exchange-failed", exchange_failed_callback) self.exchanger.exchange() self.assertEqual(exchanged, [True]) def test_stop(self): self.exchanger.schedule_exchange() self.exchanger.stop() self.wait_for_exchange() self.assertFalse(self.transport.payloads) def test_stop_twice_doesnt_break(self): self.exchanger.schedule_exchange() self.exchanger.stop() self.exchanger.stop() self.wait_for_exchange() self.assertFalse(self.transport.payloads) def test_set_intervals(self): """ When a C{set-intervals} message is received, the runtime attributes of the L{MessageExchange} are changed, the configuration values as well, and the configuration is written to disk to be persisted. """ server_message = [{"type": "set-intervals", "urgent-exchange": 1234, "exchange": 5678}] self.transport.responses.append(server_message) self.exchanger.exchange() self.assertEqual(self.config.exchange_interval, 5678) self.assertEqual(self.config.urgent_exchange_interval, 1234) new_config = BrokerConfiguration() new_config.load_configuration_file(self.config_filename) self.assertEqual(new_config.exchange_interval, 5678) self.assertEqual(new_config.urgent_exchange_interval, 1234) def test_set_intervals_with_urgent_exchange_only(self): server_message = [{"type": "set-intervals", "urgent-exchange": 1234}] self.transport.responses.append(server_message) self.exchanger.exchange() # Let's make sure it works. self.exchanger.schedule_exchange(urgent=True) self.reactor.advance(1233) self.assertEqual(len(self.transport.payloads), 1) self.reactor.advance(1) self.assertEqual(len(self.transport.payloads), 2) def test_set_intervals_with_exchange_only(self): server_message = [{"type": "set-intervals", "exchange": 5678}] self.transport.responses.append(server_message) self.exchanger.exchange() # Let's make sure it works. self.reactor.advance(5677) self.assertEqual(len(self.transport.payloads), 1) self.reactor.advance(1) self.assertEqual(len(self.transport.payloads), 2) def test_register_message(self): """ The exchanger expsoses a mechanism for subscribing to messages of a particular type. """ messages = [] self.exchanger.register_message("type-A", messages.append) msg = {"type": "type-A", "whatever": 5678} server_message = [msg] self.transport.responses.append(server_message) self.exchanger.exchange() self.assertEqual(messages, [msg]) def test_register_multiple_message_handlers(self): """ Registering multiple handlers for the same type will cause each handler to be called in the order they were registered. """ messages = [] def handler1(message): messages.append(("one", message)) def handler2(message): messages.append(("two", message)) self.exchanger.register_message("type-A", handler1) self.exchanger.register_message("type-A", handler2) msg = {"type": "type-A", "whatever": 5678} server_message = [msg] self.transport.responses.append(server_message) self.exchanger.exchange() self.assertEqual(messages, [("one", msg), ("two", msg)]) def test_server_uuid_is_stored_on_message_store(self): self.transport.extra["server-uuid"] = "first-uuid" self.exchanger.exchange() self.assertEqual(self.mstore.get_server_uuid(), "first-uuid") self.transport.extra["server-uuid"] = "second-uuid" self.exchanger.exchange() self.assertEqual(self.mstore.get_server_uuid(), "second-uuid") def test_server_uuid_change_cause_event(self): called = [] def server_uuid_changed(old_uuid, new_uuid): called.append((old_uuid, new_uuid)) self.reactor.call_on("server-uuid-changed", server_uuid_changed) # Set it for the first time, and it should emit the event # letting the system know about the change. self.transport.extra["server-uuid"] = "first-uuid" self.exchanger.exchange() self.assertEqual(len(called), 1) self.assertEqual(called[-1], (None, "first-uuid")) # Using the same one again, nothing should happen: self.transport.extra["server-uuid"] = "first-uuid" self.exchanger.exchange() self.assertEqual(len(called), 1) # Changing it, we should get an event again: self.transport.extra["server-uuid"] = "second-uuid" self.exchanger.exchange() self.assertEqual(len(called), 2) self.assertEqual(called[-1], ("first-uuid", "second-uuid")) # And then, it shouldn't emit it once more, since it continues # to be the same. self.transport.extra["server-uuid"] = "second-uuid" self.exchanger.exchange() self.assertEqual(len(called), 2) def test_server_uuid_event_not_emitted_with_matching_stored_uuid(self): """ If the UUID in the message store is the same as the current UUID, the event is not emitted. """ called = [] def server_uuid_changed(old_uuid, new_uuid): called.append((old_uuid, new_uuid)) self.reactor.call_on("server-uuid-changed", server_uuid_changed) self.mstore.set_server_uuid("the-uuid") self.transport.extra["server-uuid"] = "the-uuid" self.exchanger.exchange() self.assertEqual(called, []) def test_server_uuid_change_is_logged(self): self.transport.extra["server-uuid"] = "the-uuid" self.exchanger.exchange() self.assertIn("INFO: Server UUID changed (old=None, new=the-uuid).", self.logfile.getvalue()) # An exchange with the same UUID shouldn't be logged. self.logfile.truncate(0) self.transport.extra["server-uuid"] = "the-uuid" self.exchanger.exchange() self.assertNotIn("INFO: Server UUID changed", self.logfile.getvalue()) def test_return_messages_have_their_context_stored(self): """ Incoming messages with an 'operation-id' key will have the secure id stored in the L{ExchangeStore}. """ messages = [] self.exchanger.register_message("type-R", messages.append) msg = {"type": "type-R", "whatever": 5678, "operation-id": 123456} server_message = [msg] self.transport.responses.append(server_message) self.exchanger.exchange() [message] = messages self.assertIsNot( None, self.exchange_store.get_message_context(message['operation-id'])) message_context = self.exchange_store.get_message_context( message['operation-id']) self.assertEqual(message_context.operation_id, 123456) self.assertEqual(message_context.message_type, "type-R") def test_one_way_messages_do_not_have_their_context_stored(self): """ Incoming messages without an 'operation-id' key will *not* have the secure id stored in the L{ExchangeStore}. """ ids_before = self.exchange_store.all_operation_ids() msg = {"type": "type-R", "whatever": 5678} server_message = [msg] self.transport.responses.append(server_message) self.exchanger.exchange() ids_after = self.exchange_store.all_operation_ids() self.assertEqual(ids_before, ids_after) def test_obsolete_response_messages_are_discarded(self): """ An obsolete response message will be discarded as opposed to being sent to the server. A response message is considered obsolete if the secure ID changed since the request message was received. """ # Receive the message below from the server. msg = {"type": "type-R", "whatever": 5678, "operation-id": 234567} server_message = [msg] self.transport.responses.append(server_message) self.exchanger.exchange() # Change the secure ID so that the response message gets discarded. self.identity.secure_id = 'brand-new' ids_before = self.exchange_store.all_operation_ids() self.mstore.set_accepted_types(["resynchronize"]) message_id = self.exchanger.send( {"type": "resynchronize", "operation-id": 234567}) self.exchanger.exchange() self.assertEqual(2, len(self.transport.payloads)) messages = self.transport.payloads[1]["messages"] self.assertEqual([], messages) self.assertIs(None, message_id) expected_log_entry = ( "Response message with operation-id 234567 was discarded because " "the client's secure ID has changed in the meantime") self.assertIn(expected_log_entry, self.logfile.getvalue()) # The MessageContext was removed after utilisation. ids_after = self.exchange_store.all_operation_ids() self.assertEqual(len(ids_after), len(ids_before) - 1) self.assertNotIn('234567', ids_after) def test_error_exchanging_causes_failed_exchange(self): """ If a traceback occurs whilst exchanging, the 'exchange-failed' event should be fired. """ events = [] def failed_exchange(): events.append(None) self.reactor.call_on("exchange-failed", failed_exchange) self.exchanger._transport = RaisingTransport() self.exchanger.exchange() self.assertEqual([None], events) def test_error_exchanging_records_failure_in_message_store(self): """ If a traceback occurs whilst exchanging, the failure is recorded in the message store. """ mock_message_store = self.mocker.proxy(self.mstore) mock_message_store.record_failure(MATCH(lambda x: type(x) is int)) self.mocker.result(None) self.mocker.replay() exchanger = MessageExchange( self.reactor, mock_message_store, RaisingTransport(), self.identity, self.exchange_store, self.config) exchanger.exchange() def test_error_exchanging_marks_exchange_complete(self): """ If a traceback occurs whilst exchanging, the exchange is still marked as complete. """ events = [] def exchange_done(): events.append(None) self.reactor.call_on("exchange-done", exchange_done) self.exchanger._transport = RaisingTransport() self.exchanger.exchange() self.assertEqual([None], events) def test_error_exchanging_logs_failure(self): """ If a traceback occurs whilst exchanging, the failure is logged. """ self.exchanger._transport = RaisingTransport() self.exchanger.exchange() self.assertIn("Message exchange failed.", self.logfile.getvalue()) class AcceptedTypesMessageExchangeTest(LandscapeTest): helpers = [ExchangeHelper] def setUp(self): super(AcceptedTypesMessageExchangeTest, self).setUp() self.pinger = Pinger(self.reactor, self.identity, self.exchanger, self.config) # The __init__ method of RegistrationHandler registers a few default # message types that we want to catch as well self.handler = RegistrationHandler( self.config, self.identity, self.reactor, self.exchanger, self.pinger, self.mstore, fetch_async) def test_register_accepted_message_type(self): self.exchanger.register_client_accepted_message_type("type-B") self.exchanger.register_client_accepted_message_type("type-A") self.exchanger.register_client_accepted_message_type("type-C") self.exchanger.register_client_accepted_message_type("type-A") types = self.exchanger.get_client_accepted_message_types() self.assertEqual(types, sorted(["type-A", "type-B", "type-C"] + DEFAULT_ACCEPTED_TYPES)) def test_exchange_sends_message_type_when_no_hash(self): self.exchanger.register_client_accepted_message_type("type-A") self.exchanger.register_client_accepted_message_type("type-B") self.exchanger.exchange() self.assertEqual( self.transport.payloads[0]["client-accepted-types"], sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) def test_exchange_does_not_send_message_types_when_hash_matches(self): self.exchanger.register_client_accepted_message_type("type-A") self.exchanger.register_client_accepted_message_type("type-B") types = sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES) accepted_types_digest = md5(";".join(types)).digest() self.transport.extra["client-accepted-types-hash"] = \ accepted_types_digest self.exchanger.exchange() self.exchanger.exchange() self.assertNotIn("client-accepted-types", self.transport.payloads[1]) def test_exchange_continues_sending_message_types_on_no_hash(self): """ If the server does not respond with a hash of client accepted message types, the client will continue to send the accepted types. """ self.exchanger.register_client_accepted_message_type("type-A") self.exchanger.register_client_accepted_message_type("type-B") self.exchanger.exchange() self.exchanger.exchange() self.assertEqual( self.transport.payloads[1]["client-accepted-types"], sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) def test_exchange_sends_new_accepted_types_hash(self): """ If the accepted types on the client change between exchanges, the client will send a new list to the server. """ self.exchanger.register_client_accepted_message_type("type-A") types_hash = md5("type-A").digest() self.transport.extra["client-accepted-types-hash"] = types_hash self.exchanger.exchange() self.exchanger.register_client_accepted_message_type("type-B") self.exchanger.exchange() self.assertEqual( self.transport.payloads[1]["client-accepted-types"], sorted(["type-A", "type-B"] + DEFAULT_ACCEPTED_TYPES)) def test_exchange_sends_new_types_when_server_screws_up(self): """ If the server suddenly and without warning changes the hash of accepted client types that it sends to the client, the client will send a new list of types. """ self.exchanger.register_client_accepted_message_type("type-A") types_hash = md5("type-A").digest() self.transport.extra["client-accepted-types-hash"] = types_hash self.exchanger.exchange() self.transport.extra["client-accepted-types-hash"] = "lol" self.exchanger.exchange() self.exchanger.exchange() self.assertEqual( self.transport.payloads[2]["client-accepted-types"], sorted(["type-A"] + DEFAULT_ACCEPTED_TYPES)) def test_register_message_adds_accepted_type(self): """ Using the C{register_message} method of the exchanger causes the registered message to be included in the accepted types of the client that are sent to the server. """ self.exchanger.register_message("typefoo", lambda m: None) types = self.exchanger.get_client_accepted_message_types() self.assertEqual(types, sorted(["typefoo"] + DEFAULT_ACCEPTED_TYPES)) class GetAcceptedTypesDiffTest(LandscapeTest): def test_diff_empty(self): self.assertEqual(get_accepted_types_diff([], []), "") def test_diff_add(self): self.assertEqual(get_accepted_types_diff([], ["wubble"]), "+wubble") def test_diff_remove(self): self.assertEqual(get_accepted_types_diff(["wubble"], []), "-wubble") def test_diff_no_change(self): self.assertEqual(get_accepted_types_diff(["ooga"], ["ooga"]), "ooga") def test_diff_complex(self): self.assertEqual(get_accepted_types_diff(["foo", "bar"], ["foo", "ooga"]), "+ooga foo -bar") landscape-client-14.01/landscape/broker/tests/test_client.py0000644000175000017500000003315612301414317024014 0ustar andreasandreasfrom twisted.internet import reactor from twisted.internet.defer import Deferred from landscape.lib.twisted_util import gather_results from landscape.tests.helpers import LandscapeTest, DEFAULT_ACCEPTED_TYPES from landscape.broker.tests.helpers import BrokerClientHelper from landscape.broker.client import BrokerClientPlugin, HandlerNotFoundError class BrokerClientTest(LandscapeTest): helpers = [BrokerClientHelper] def test_ping(self): """ The L{BrokerClient.ping} method always returns C{True}. """ self.assertTrue(self.client.ping()) def test_add(self): """ The L{BrokerClient.add} method registers a new plugin plugin, and calls the plugin's C{register} method. """ plugin = BrokerClientPlugin() self.client.add(plugin) self.assertIs(plugin.client, self.client) def test_registering_plugin_gets_session_id(self): """ As part of the BrokerClientPlugin registration process, a session ID is generated. """ plugin = BrokerClientPlugin() self.client.add(plugin) self.assertIsNot(None, plugin._session_id) def test_registered_plugin_uses_correct_scope(self): """ When we register a plugin we use that plugin's scope variable when getting a session id. """ test_session_id = self.successResultOf( self.client.broker.get_session_id(scope="test")) plugin = BrokerClientPlugin() plugin.scope = "test" self.client.add(plugin) self.assertEqual(test_session_id, plugin._session_id) def test_resynchronizing_refreshes_session_id(self): """ When a 'reysnchronize' event fires a new session ID is acquired as the old one will be removed. """ plugin = BrokerClientPlugin() plugin.scope = "test" self.client.add(plugin) session_id = plugin._session_id self.mstore.drop_session_ids() self.client_reactor.fire("resynchronize") self.assertNotEqual(session_id, plugin._session_id) def test_resynchronize_calls_reset(self): plugin = BrokerClientPlugin() plugin.scope = "test" self.client.add(plugin) plugin._resest = self.mocker.mock() self.expect(plugin._reset()) self.mocker.replay() self.client_reactor.fire("resynchronize") def test_get_plugins(self): """ The L{BrokerClient.get_plugins} method returns a list of registered plugins. """ plugins = [BrokerClientPlugin(), BrokerClientPlugin()] self.client.add(plugins[0]) self.client.add(plugins[1]) self.assertEqual(self.client.get_plugins(), plugins) def test_get_plugins_returns_a_copy(self): """ The L{BrokerClient.get_plugins} method returns a copy of the list of registered plugins, so user can't can't modify our internals. """ plugins = self.client.get_plugins() plugins.append(BrokerClientPlugin()) self.assertEqual(self.client.get_plugins(), []) def test_get_named_plugin(self): """ If a plugin has a C{plugin_name} attribute, it is possible to look it up by name after adding it to the L{BrokerClient}. """ plugin = BrokerClientPlugin() plugin.plugin_name = "foo" self.client.add(plugin) self.assertEqual(self.client.get_plugin("foo"), plugin) def test_run_interval(self): """ If a plugin has a C{run} method, the reactor will call it every C{run_interval} seconds. """ plugin = BrokerClientPlugin() plugin.run = self.mocker.mock() self.expect(plugin.run()).count(2) self.mocker.replay() self.client.add(plugin) self.client_reactor.advance(plugin.run_interval) self.client_reactor.advance(plugin.run_interval) def test_run_interval_blocked_during_resynch(self): """ During resynchronisation we want to block the C{run} method so that we don't send any new messages with old session ids, or with state in an indeterminate condition. """ runs = [] plugin = BrokerClientPlugin() plugin.run_immediately = True plugin.run = lambda: runs.append(True) self.client.add(plugin) # At this point the plugin has already run once and has scheduled as # second run in plugin.run_interval seconds. self.assertEquals(runs, [True]) # Mock out get_session_id so that it doesn't complete synchronously deferred = Deferred() self.client.broker.get_session_id = lambda scope: deferred self.client_reactor.fire("resynchronize") # The scheduled run has been cancelled, and even if plugin.run_interval # seconds elapse the plugin won't run again. self.client_reactor.advance(plugin.run_interval) self.assertEquals(runs, [True]) # Finally get_session_id completes and the plugin runs again. deferred.callback(123) self.assertEquals(runs, [True, True]) def test_run_immediately(self): """ If a plugin has a C{run} method and C{run_immediately} is C{True}, the plugin will be run immediately at registration. """ plugin = BrokerClientPlugin() plugin.run = self.mocker.mock() plugin.run_immediately = True self.expect(plugin.run()).count(1) self.mocker.replay() self.client.add(plugin) def test_register_message(self): """ When L{BrokerClient.register_message} is called, the broker is notified that the message type is now accepted. """ result1 = self.client.register_message("foo", lambda m: None) result2 = self.client.register_message("bar", lambda m: None) def got_result(result): self.assertEqual( self.exchanger.get_client_accepted_message_types(), sorted(["bar", "foo"] + DEFAULT_ACCEPTED_TYPES)) return gather_results([result1, result2]).addCallback(got_result) def test_dispatch_message(self): """ L{BrokerClient.dispatch_message} calls a previously-registered message handler and return its value. """ message = {"type": "foo"} handle_message = self.mocker.mock() self.expect(handle_message(message)).result(123) self.mocker.replay() def dispatch_message(result): self.assertEqual(self.client.dispatch_message(message), 123) result = self.client.register_message("foo", handle_message) return result.addCallback(dispatch_message) def test_dispatch_message_with_exception(self): """ L{BrokerClient.dispatch_message} gracefully logs exceptions raised by message handlers. """ message = {"type": "foo"} handle_message = self.mocker.mock() self.expect(handle_message(message)).throw(ZeroDivisionError) self.mocker.replay() self.log_helper.ignore_errors("Error running message handler.*") def dispatch_message(result): self.assertIs(self.client.dispatch_message(message), None) self.assertTrue("Error running message handler for type 'foo'" in self.logfile.getvalue()) result = self.client.register_message("foo", handle_message) return result.addCallback(dispatch_message) def test_dispatch_message_with_no_handler(self): """ L{BrokerClient.dispatch_message} raises an error if no handler was found for the given message. """ error = self.assertRaises(HandlerNotFoundError, self.client.dispatch_message, {"type": "x"}) self.assertEqual(str(error), "x") def test_message(self): """ The L{BrokerClient.message} method dispatches a message and returns C{True} if an handler for it was found. """ message = {"type": "foo"} handle_message = self.mocker.mock() handle_message(message) self.mocker.replay() def dispatch_message(result): self.assertEqual(self.client.message(message), True) result = self.client.register_message("foo", handle_message) return result.addCallback(dispatch_message) def test_message_with_no_handler(self): """ The L{BrokerClient.message} method returns C{False} if no handler was found. """ message = {"type": "foo"} self.assertEqual(self.client.message(message), False) def test_exchange(self): """ The L{BrokerClient.exchange} method calls C{exchange} on all plugins, if available. """ plugin = BrokerClientPlugin() plugin.exchange = self.mocker.mock() plugin.exchange() self.mocker.replay() self.client.add(plugin) self.client.exchange() def test_exchange_on_plugin_without_exchange_method(self): """ The L{BrokerClient.exchange} method ignores plugins without an C{exchange} method. """ plugin = BrokerClientPlugin() self.assertFalse(hasattr(plugin, "exchange")) self.client.exchange() def test_exchange_logs_errors_and_continues(self): """ If the L{exchange} method of a registered plugin fails, the error is logged and other plugins are processed. """ self.log_helper.ignore_errors(ZeroDivisionError) plugin1 = BrokerClientPlugin() plugin2 = BrokerClientPlugin() plugin1.exchange = self.mocker.mock() plugin2.exchange = self.mocker.mock() self.expect(plugin1.exchange()).throw(ZeroDivisionError) plugin2.exchange() self.mocker.replay() self.client.add(plugin1) self.client.add(plugin2) self.client.exchange() self.assertTrue("Error during plugin exchange" in self.logfile.getvalue()) self.assertTrue("ZeroDivisionError" in self.logfile.getvalue()) def test_notify_exchange(self): """ The L{BrokerClient.notify_exchange} method is triggered by an impending-exchange event and calls C{exchange} on all plugins, logging the event. """ plugin = BrokerClientPlugin() plugin.exchange = self.mocker.mock() plugin.exchange() self.mocker.replay() self.client.add(plugin) self.client_reactor.fire("impending-exchange") self.assertTrue("Got notification of impending exchange. " "Notifying all plugins." in self.logfile.getvalue()) def test_fire_event(self): """ The L{BrokerClient.fire_event} method makes the reactor fire the given event. """ callback = self.mocker.mock() callback() self.mocker.replay() self.client_reactor.call_on("event", callback) self.client.fire_event("event") def test_fire_event_with_arguments(self): """ The L{BrokerClient.fire_event} accepts optional arguments and keyword arguments to pass to the registered callback. """ callback = self.mocker.mock() callback(True, kwarg=2) self.mocker.replay() self.client_reactor.call_on("event", callback) self.client.fire_event("event", True, kwarg=2) def test_fire_event_with_mixed_results(self): """ The return values of the fired handlers can be part L{Deferred}s and part not. """ deferred = Deferred() callback1 = self.mocker.mock() callback2 = self.mocker.mock() self.expect(callback1()).result(123) self.expect(callback2()).result(deferred) self.mocker.replay() self.client_reactor.call_on("event", callback1) self.client_reactor.call_on("event", callback2) result = self.client.fire_event("event") reactor.callLater(0, lambda: deferred.callback("abc")) return self.assertSuccess(result, [123, "abc"]) def test_fire_event_with_acceptance_changed(self): """ When the given event type is C{message-type-acceptance-changed}, the fired event will be a 2-tuple of the eventy type and the message type. """ event_type = "message-type-acceptance-changed" callback = self.mocker.mock() callback(False) self.mocker.replay() self.client_reactor.call_on((event_type, "test"), callback) self.client.fire_event(event_type, "test", False) def test_handle_reconnect(self): """ The L{BrokerClient.handle_reconnect} method is triggered by a broker-reconnect event, and it causes any message types previously registered with the broker to be registered again. """ result1 = self.client.register_message("foo", lambda m: None) result2 = self.client.register_message("bar", lambda m: None) def got_result(result): self.client.broker = self.mocker.mock() self.client.broker.register_client_accepted_message_type("foo") self.client.broker.register_client_accepted_message_type("bar") self.client.broker.register_client("client") self.mocker.replay() self.client_reactor.fire("broker-reconnect") return gather_results([result1, result2]).addCallback(got_result) def test_exit(self): """ The L{BrokerClient.exit} method causes the reactor to be stopped. """ self.client.reactor.stop = self.mocker.mock() self.client.reactor.stop() self.mocker.replay() self.client.exit() self.client.reactor.advance(0.1) landscape-client-14.01/landscape/broker/tests/public.ssl0000644000175000017500000000244212301414317023120 0ustar andreasandreas-----BEGIN CERTIFICATE----- MIIDnDCCAwWgAwIBAgIJALPjWsknBC15MA0GCSqGSIb3DQEBBQUAMIGRMQswCQYD VQQGEwJCUjEPMA0GA1UECBMGUGFyYW5hMREwDwYDVQQHEwhDdXJpdGliYTESMBAG A1UEChMJTGFuZHNjYXBlMRAwDgYDVQQLEwdUZXN0aW5nMRIwEAYDVQQDEwlsb2Nh bGhvc3QxJDAiBgkqhkiG9w0BCQEWFWFuZHJlYXNAY2Fub25pY2FsLmNvbTAeFw0w OTAxMDgxNjQxMzlaFw0xOTAxMDYxNjQxMzlaMIGRMQswCQYDVQQGEwJCUjEPMA0G A1UECBMGUGFyYW5hMREwDwYDVQQHEwhDdXJpdGliYTESMBAGA1UEChMJTGFuZHNj YXBlMRAwDgYDVQQLEwdUZXN0aW5nMRIwEAYDVQQDEwlsb2NhbGhvc3QxJDAiBgkq hkiG9w0BCQEWFWFuZHJlYXNAY2Fub25pY2FsLmNvbTCBnzANBgkqhkiG9w0BAQEF AAOBjQAwgYkCgYEA19lTRA2R7ZeZ4pjaHLIWq/KQZo6ohp/aJQkdi3Con7iWOGxU 4Kom/ka979g8A02zjGqrkUsRYMCySwroN1nJbBq2EjyLBQsfpN14ajOtSVxMKyri KSbPPd1x5HoN0hBbmv6FXw8TXybbaB1txW7Jw8efeuJGMBhMYyXB+9xiJ1ECAwEA AaOB+TCB9jAdBgNVHQ4EFgQU3eUz2XxK1J/oavkn/hAvYfGOZM0wgcYGA1UdIwSB vjCBu4AU3eUz2XxK1J/oavkn/hAvYfGOZM2hgZekgZQwgZExCzAJBgNVBAYTAkJS MQ8wDQYDVQQIEwZQYXJhbmExETAPBgNVBAcTCEN1cml0aWJhMRIwEAYDVQQKEwlM YW5kc2NhcGUxEDAOBgNVBAsTB1Rlc3RpbmcxEjAQBgNVBAMTCWxvY2FsaG9zdDEk MCIGCSqGSIb3DQEJARYVYW5kcmVhc0BjYW5vbmljYWwuY29tggkAs+NayScELXkw DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQBZQcqhHAsasX3WtCXlIKqH hE4ZdsvtPHOnoWPxxN4CZEyu2YJ2PMXCkA7yISNokAZgkOpkYGPWwV3CwNCw032u +ngwIo2sxx7ag8tVrYkIda717oBw7opDMVrjTNhZdak7s+hg+s9ZDPUMMcbJFtlN lmayn/uZSyog4Y+yriB1tQ== -----END CERTIFICATE----- landscape-client-14.01/landscape/broker/tests/__init__.py0000644000175000017500000000000012301414317023214 0ustar andreasandreaslandscape-client-14.01/landscape/broker/tests/test_exchangestore.py0000644000175000017500000000626712301414317025400 0ustar andreasandreasimport time try: import sqlite3 except ImportError: from pysqlite2 import dbapi2 as sqlite3 from landscape.tests.helpers import LandscapeTest from landscape.broker.exchangestore import ExchangeStore class ExchangeStoreTest(LandscapeTest): """Unit tests for the C{ExchangeStore}.""" def setUp(self): super(ExchangeStoreTest, self).setUp() self.filename = self.makeFile() self.store1 = ExchangeStore(self.filename) self.store2 = ExchangeStore(self.filename) def test_add_message_context(self): """Adding a message context works correctly.""" now = time.time() self.store1.add_message_context(123, 'abc', 'change-packages') db = sqlite3.connect(self.store2._filename) cursor = db.cursor() cursor.execute( "SELECT operation_id, secure_id, message_type, timestamp " "FROM message_context WHERE operation_id=?", (123,)) results = cursor.fetchall() self.assertEqual(1, len(results)) [row] = results self.assertEqual(123, row[0]) self.assertEqual('abc', row[1]) self.assertEqual('change-packages', row[2]) self.assertTrue(row[3] > now) def test_add_message_context_with_duplicate_operation_id(self): """Only one message context with a given operation-id is permitted.""" self.store1.add_message_context(123, "abc", "change-packages") self.assertRaises( (sqlite3.IntegrityError, sqlite3.OperationalError), self.store1.add_message_context, 123, "def", "change-packages") def test_get_message_context(self): """ Accessing a C{MessageContext} with an existing C{operation-id} works. """ now = time.time() self.store1.add_message_context(234, 'bcd', 'change-packages') context = self.store2.get_message_context(234) self.assertEqual(234, context.operation_id) self.assertEqual('bcd', context.secure_id) self.assertEqual('change-packages', context.message_type) self.assertTrue(context.timestamp > now) def test_get_message_context_with_nonexistent_operation_id(self): """Attempts to access a C{MessageContext} with a non-existent C{operation-id} result in C{None}.""" self.assertIs(None, self.store1.get_message_context(999)) def test_message_context_remove(self): """C{MessageContext}s are deleted correctly.""" context = self.store1.add_message_context( 345, 'opq', 'change-packages') context.remove() self.assertIs(None, self.store1.get_message_context(345)) def test_all_operation_ids_for_empty_database(self): """ Calling C{all_operation_ids} on an empty database returns an empty list. """ self.assertEqual([], self.store1.all_operation_ids()) def test_all_operation_ids(self): """C{all_operation_ids} works correctly.""" self.store1.add_message_context(456, 'cde', 'change-packages') self.assertEqual([456], self.store2.all_operation_ids()) self.store2.add_message_context(567, 'def', 'change-packages') self.assertEqual([456, 567], self.store1.all_operation_ids()) landscape-client-14.01/landscape/broker/tests/test_config.py0000644000175000017500000001263312301414317024000 0ustar andreasandreasimport os from landscape.broker.config import BrokerConfiguration from landscape.tests.helpers import LandscapeTest, EnvironSaverHelper class ConfigurationTests(LandscapeTest): helpers = [EnvironSaverHelper] def test_loading_sets_http_proxies(self): """ The L{BrokerConfiguration.load} method sets the 'http_proxy' and 'https_proxy' enviroment variables to the provided values. """ if "http_proxy" in os.environ: del os.environ["http_proxy"] if "https_proxy" in os.environ: del os.environ["https_proxy"] configuration = BrokerConfiguration() configuration.load(["--http-proxy", "foo", "--https-proxy", "bar", "--url", "whatever"]) self.assertEqual(os.environ["http_proxy"], "foo") self.assertEqual(os.environ["https_proxy"], "bar") def test_loading_without_http_proxies_does_not_touch_environment(self): """ The L{BrokerConfiguration.load} method doesn't override the 'http_proxy' and 'https_proxy' enviroment variables if they are already set and no new value was specified. """ os.environ["http_proxy"] = "heyo" os.environ["https_proxy"] = "baroo" configuration = BrokerConfiguration() configuration.load(["--url", "whatever"]) self.assertEqual(os.environ["http_proxy"], "heyo") self.assertEqual(os.environ["https_proxy"], "baroo") def test_loading_resets_http_proxies(self): """ User scenario: Runs landscape-config, fat-fingers a random character into the http_proxy field when he didn't mean to. runs it again, this time leaving it blank. The proxy should be reset to whatever environment-supplied proxy there was at startup. """ os.environ["http_proxy"] = "original" os.environ["https_proxy"] = "originals" configuration = BrokerConfiguration() configuration.load(["--http-proxy", "x", "--https-proxy", "y", "--url", "whatever"]) self.assertEqual(os.environ["http_proxy"], "x") self.assertEqual(os.environ["https_proxy"], "y") configuration.load(["--url", "whatever"]) self.assertEqual(os.environ["http_proxy"], "original") self.assertEqual(os.environ["https_proxy"], "originals") def test_default_exchange_intervals(self): """Exchange intervales are set to sane defaults.""" configuration = BrokerConfiguration() self.assertEqual(60, configuration.urgent_exchange_interval) self.assertEqual(900, configuration.exchange_interval) def test_intervals_are_ints(self): """ The 'urgent_exchange_interval, 'exchange_interval' and 'ping_interval' values specified in the configuration file are converted to integers. """ filename = self.makeFile("[client]\n" "urgent_exchange_interval = 12\n" "exchange_interval = 34\n" "ping_interval = 6\n") configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) self.assertEqual(configuration.urgent_exchange_interval, 12) self.assertEqual(configuration.exchange_interval, 34) self.assertEqual(configuration.ping_interval, 6) def test_tag_handling(self): """ The 'tags' value specified in the configuration file is not converted to a list (it must be a string). See bug #1228301. """ filename = self.makeFile("[client]\n" "tags = check,linode,profile-test") configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) self.assertEqual(configuration.tags, "check,linode,profile-test") def test_access_group_handling(self): """ The 'access_group' value specified in the configuration file is passed through. """ filename = self.makeFile("[client]\n" "access_group = webserver") configuration = BrokerConfiguration() configuration.load(["--config", filename, "--url", "whatever"]) self.assertEqual(configuration.access_group, "webserver") def test_missing_url_is_defaulted(self): """ Test that if we don't explicitly pass a URL, then this value is defaulted. """ filename = self.makeFile("[client]\n") configuration = BrokerConfiguration() configuration.load(["--config", filename]) self.assertEqual(configuration.url, "https://landscape.canonical.com/message-system") def test_server_autodiscover_handling(self): """ server_autodiscover is parsed and converted to a boolean value by load(). """ configuration = BrokerConfiguration() configuration.load([]) self.assertEqual(configuration.server_autodiscover, False) configuration = BrokerConfiguration() configuration.load(["--server-autodiscover=true"]) self.assertEqual(configuration.server_autodiscover, True) configuration = BrokerConfiguration() configuration.load(["--server-autodiscover=false"]) self.assertEqual(configuration.server_autodiscover, False) landscape-client-14.01/landscape/broker/tests/test_ping.py0000644000175000017500000002450412301414317023470 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from twisted.internet.defer import fail from landscape.lib.bpickle import dumps from landscape.lib.fetch import fetch from landscape.reactor import FakeReactor from landscape.broker.ping import PingClient, Pinger from landscape.broker.tests.helpers import ExchangeHelper class FakePageGetter(object): """An fake web client.""" def __init__(self, response): self.response = response self.fetches = [] def get_page(self, url, post, headers, data): """ A method which is supposed to act like a limited version of L{landscape.lib.fetch.fetch}. Record attempts to get pages, and return a deferred with pre-cooked data. """ self.fetches.append((url, post, headers, data)) return dumps(self.response) def failing_get_page(self, url, post, headers, data): """ A method which is supposed to act like a limited version of L{landscape.lib.fetch.fetch}. Record attempts to get pages, and return a deferred with pre-cooked data. """ raise AssertionError("That's a failure!") class PingClientTest(LandscapeTest): def setUp(self): super(PingClientTest, self).setUp() self.reactor = FakeReactor() def test_default_get_page(self): """ The C{get_page} argument to L{PingClient} should be optional, and default to L{twisted.web.client.getPage}. """ client = PingClient(self.reactor) self.assertEqual(client.get_page, fetch) def test_ping(self): """ L{PingClient} should be able to send a web request to a specified URL about a particular insecure ID. """ client = FakePageGetter(None) url = "http://localhost/ping" insecure_id = 10 pinger = PingClient(self.reactor, get_page=client.get_page) pinger.ping(url, insecure_id) self.assertEqual( client.fetches, [(url, True, {"Content-Type": "application/x-www-form-urlencoded"}, "insecure_id=10")]) def test_ping_no_insecure_id(self): """ If a L{PingClient} does not have an insecure-id yet, then the ping should not happen. """ client = FakePageGetter(None) pinger = PingClient(self.reactor, get_page=client.get_page) d = pinger.ping("http://ping/url", None) d.addCallback(self.assertEqual, False) self.assertEqual(client.fetches, []) def test_respond(self): """ The L{PingClient.ping} fire the Deferred it returns with True if the web request indicates that the computer has messages. """ client = FakePageGetter({"messages": True}) pinger = PingClient(self.reactor, get_page=client.get_page) d = pinger.ping("http://ping/url", 23) d.addCallback(self.assertEqual, True) def test_errback(self): """ If the HTTP request fails the deferred returned by L{PingClient.ping} fires back with an error. """ client = FakePageGetter(None) pinger = PingClient(self.reactor, get_page=client.failing_get_page) d = pinger.ping("http://ping/url", 23) failures = [] def errback(failure): failures.append(failure) d.addErrback(errback) self.assertEqual(len(failures), 1) self.assertEqual(failures[0].getErrorMessage(), "That's a failure!") self.assertEqual(failures[0].type, AssertionError) class PingerTest(LandscapeTest): helpers = [ExchangeHelper] # Tell the Plugin helper to not add a MessageExchange plugin, to interfere # with our code which asserts stuff about when *our* plugin fires # exchanges. install_exchanger = False def setUp(self): super(PingerTest, self).setUp() self.page_getter = FakePageGetter(None) def factory(reactor): return PingClient(reactor, get_page=self.page_getter.get_page) self.config.ping_url = "http://localhost:8081/whatever" self.config.ping_interval = 10 self.pinger = Pinger(self.reactor, self.identity, self.exchanger, self.config, ping_client_factory=factory) def test_default_ping_client(self): """ The C{ping_client_factory} argument to L{Pinger} should be optional, and default to L{PingClient}. """ pinger = Pinger(self.reactor, self.identity, self.exchanger, self.config) self.assertEqual(pinger.ping_client_factory, PingClient) def test_occasional_ping(self): """ The L{Pinger} should be able to occasionally ask if there are messages. """ self.pinger.start() self.identity.insecure_id = 23 self.reactor.advance(9) self.assertEqual(len(self.page_getter.fetches), 0) self.reactor.advance(1) self.assertEqual(len(self.page_getter.fetches), 1) def test_load_insecure_id(self): """ If the insecure-id has already been saved when the plugin is registered, it should immediately start pinging. """ self.identity.insecure_id = 42 self.pinger.start() self.reactor.advance(10) self.assertEqual(len(self.page_getter.fetches), 1) def test_response(self): """ When a ping indicates there are messages, an exchange should occur. """ self.pinger.start() self.identity.insecure_id = 42 self.page_getter.response = {"messages": True} # 70 = ping delay + urgent exchange delay self.reactor.advance(70) self.assertEqual(len(self.transport.payloads), 1) def test_negative_response(self): """ When a ping indicates there are no messages, no exchange should occur. """ self.pinger.start() self.identity.insecure_id = 42 self.page_getter.response = {"messages": False} self.reactor.advance(10) self.assertEqual(len(self.transport.payloads), 0) def test_ping_error(self): """ When the web interaction fails for some reason, a message should be logged. """ self.log_helper.ignore_errors(ZeroDivisionError) self.identity.insecure_id = 42 class BadPingClient(object): def __init__(self, *args, **kwargs): pass def ping(self, url, secure_id): self.url = url return fail(ZeroDivisionError("Couldn't fetch page")) self.config.ping_url = "http://foo.com/" pinger = Pinger(self.reactor, self.identity, self.exchanger, self.config, ping_client_factory=BadPingClient) pinger.start() self.reactor.advance(30) log = self.logfile.getvalue() self.assertTrue("Error contacting ping server at " "http://foo.com/" in log, log) self.assertTrue("ZeroDivisionError" in log) self.assertTrue("Couldn't fetch page" in log) def test_get_interval(self): self.assertEqual(self.pinger.get_interval(), 10) def test_set_intervals_handling(self): self.pinger.start() self.reactor.fire("message", {"type": "set-intervals", "ping": 73}) self.assertEqual(self.pinger.get_interval(), 73) # The server may set specific intervals only, not including the ping. self.reactor.fire("message", {"type": "set-intervals"}) self.assertEqual(self.pinger.get_interval(), 73) self.identity.insecure_id = 23 self.reactor.advance(72) self.assertEqual(len(self.page_getter.fetches), 0) self.reactor.advance(1) self.assertEqual(len(self.page_getter.fetches), 1) def test_get_url(self): self.assertEqual(self.pinger.get_url(), "http://localhost:8081/whatever") def test_config_url(self): """ The L{Pinger} uses the ping URL set in the given configuration. """ self.identity.insecure_id = 23 url = "http://example.com/mysuperping" self.config.ping_url = url self.pinger.start() self.reactor.advance(10) self.assertEqual(self.page_getter.fetches[0][0], url) def test_reschedule(self): """ Each time a ping is completed the L{Pinger} schedules a new ping using the current ping interval. """ self.identity.insecure_id = 23 self.pinger.start() self.reactor.advance(10) self.assertEqual(1, len(self.page_getter.fetches)) self.reactor.advance(10) self.assertEqual(2, len(self.page_getter.fetches)) def test_reschedule_with_ping_interval_change(self): """ If the ping interval changes, new pings will be scheduled accordingly. """ self.identity.insecure_id = 23 self.pinger.start() self.reactor.advance(5) # Simulate interval changing in the meantime self.config.ping_interval = 20 self.reactor.advance(5) self.assertEqual(1, len(self.page_getter.fetches)) # The new interval is 20, so after only 10 seconds nothing happens self.reactor.advance(10) self.assertEqual(1, len(self.page_getter.fetches)) # After another 10 seconds we reach the 20 seconds interval and the # ping is triggered self.reactor.advance(10) self.assertEqual(2, len(self.page_getter.fetches)) def test_change_url_after_start(self): """ If the C{ping_url} set in the configuration is changed after the pinger has started, the target HTTP url will adjust accordingly. """ url = "http://example.com/mysuperping" self.pinger.start() self.config.ping_url = url self.identity.insecure_id = 23 self.reactor.advance(10) self.assertEqual(self.page_getter.fetches[0][0], url) def test_ping_doesnt_ping_if_stopped(self): """If the L{Pinger} is stopped, no pings are performed.""" self.pinger.start() self.pinger.stop() self.reactor.advance(10) self.assertEqual([], self.page_getter.fetches) landscape-client-14.01/landscape/broker/client.py0000644000175000017500000002242112301414317021604 0ustar andreasandreasfrom logging import info, exception from twisted.internet.defer import maybeDeferred from landscape.log import format_object from landscape.lib.twisted_util import gather_results from landscape.amp import remote class HandlerNotFoundError(Exception): """A handler for the given message type was not found.""" class BrokerClientPlugin(object): """A convenience for writing L{BrokerClient} plugins. This provides a register method which will set up a bunch of reactor handlers in the idiomatic way. If C{run} is defined on subclasses, it will be called every C{run_interval} +seconds after being registered. @cvar run_interval: The interval, in seconds, to execute the C{run} method. If set to C{None}, then C{run} will not be scheduled. @cvar run_immediately: If C{True} the plugin will be run immediately after it is registered. @ivar _session_id: the session id to be passed when sending messages via the broker. This variable is set by the C{register} method and should only need to be renewed when a re-synchronisation request is sent. See L{landscape.broker.server.BrokerServer.send_message} for more details. """ run_interval = 5 run_immediately = False scope = None # Global scope _session_id = None _loop = None def register(self, client): self.client = client self.client.reactor.call_on("resynchronize", self._resynchronize) deferred = self.client.broker.get_session_id(scope=self.scope) deferred.addCallback(self._got_session_id) @property def registry(self): """An alias for the C{client} attribute.""" return self.client def call_on_accepted(self, type, callable, *args, **kwargs): """ Register a callback fired upon a C{message-type-acceptance-changed}. """ def acceptance_changed(acceptance): if acceptance: return callable(*args, **kwargs) self.client.reactor.call_on(("message-type-acceptance-changed", type), acceptance_changed) def _resynchronize(self, scopes=None): """ Handle the 'resynchronize' event. Subclasses should do any clear-down operations specific to their state within an implementation of the L{_reset} method. """ if not (scopes is None or self.scope in scopes): # This resynchronize event is out of scope for us. Do nothing return # Because the broker will drop session IDs already associated to scope # of the resynchronisation, it isn't safe to send messages until the # client has received a new session ID. Therefore we pause any calls # to L{run} by cancelling L{_loop}, this will be restarted in # L{_got_session_id}. if self._loop is not None: self.client.reactor.cancel_call(self._loop) # Do any state clean up required by the plugin. self._reset() deferred = self.client.broker.get_session_id(scope=self.scope) deferred.addCallback(self._got_session_id) return deferred def _reset(self): """ Reset plugin specific state. Sub-classes should override this method to clear down data for resynchronisation. Sub-classes with no state can simply ignore this. """ def _got_session_id(self, session_id): """Save the session ID and invoke the C{run} method. We set the C{_session_id} attribute on the instance because it's required in order to send messages. See L{BrokerService.get_session_id}. """ self._session_id = session_id if getattr(self, "run", None) is not None: if self.run_immediately: self.run() if self.run_interval is not None: self._loop = self.client.reactor.call_every(self.run_interval, self.run) class BrokerClient(object): """Basic plugin registry for clients that have to deal with the broker. This knows about the needs of a client when dealing with the Landscape broker, including interest in messages of a particular type delivered by the broker to the client. @cvar name: The name used when registering to the broker, it must be defined by sub-classes. @ivar broker: A reference to a connected L{RemoteBroker}, it must be set by the connecting machinery at service startup. @param reactor: A L{LandscapeReactor}. """ name = "client" def __init__(self, reactor): super(BrokerClient, self).__init__() self.reactor = reactor self.broker = None self._registered_messages = {} self._plugins = [] self._plugin_names = {} # Register event handlers self.reactor.call_on("impending-exchange", self.notify_exchange) self.reactor.call_on("broker-reconnect", self.handle_reconnect) @remote def ping(self): """Return C{True}""" return True def add(self, plugin): """Add a plugin. The plugin's C{register} method will be called with this broker client as its argument. If the plugin has a C{plugin_name} attribute, it will be possible to look up the plugin later with L{get_plugin}. """ info("Registering plugin %s.", format_object(plugin)) self._plugins.append(plugin) if hasattr(plugin, 'plugin_name'): self._plugin_names[plugin.plugin_name] = plugin plugin.register(self) def get_plugins(self): """Get the list of plugins.""" return self._plugins[:] def get_plugin(self, name): """Get a particular plugin by name.""" return self._plugin_names[name] def register_message(self, type, handler): """ Register interest in a particular type of Landscape server->client message. @param type: The type of message to register C{handler} for. @param handler: A callable taking a message as a parameter, called when messages of C{type} are received. @return: A C{Deferred} that will fire when registration completes. """ self._registered_messages[type] = handler return self.broker.register_client_accepted_message_type(type) def dispatch_message(self, message): """Run the handler registered for the type of the given message. @return: The return value of the handler, if found. @raises: HandlerNotFoundError if the handler was not found """ type = message["type"] handler = self._registered_messages.get(type) if handler is None: raise HandlerNotFoundError(type) try: return handler(message) except: exception("Error running message handler for type %r: %r" % (type, handler)) @remote def message(self, message): """Call C{dispatch_message} for the given C{message}. @return: A boolean indicating if a handler for the message was found. """ try: self.dispatch_message(message) return True except HandlerNotFoundError: return False def exchange(self): """Call C{exchange} on all plugins.""" for plugin in self.get_plugins(): if hasattr(plugin, "exchange"): try: plugin.exchange() except: exception("Error during plugin exchange") def notify_exchange(self): """Notify all plugins about an impending exchange.""" info("Got notification of impending exchange. Notifying all plugins.") self.exchange() @remote def fire_event(self, event_type, *args, **kwargs): """Fire an event of a given type. @return: A L{Deferred} resulting in a list of returns values of the fired event handlers, in the order they were fired. """ if event_type == "message-type-acceptance-changed": message_type = args[0] acceptance = args[1] results = self.reactor.fire((event_type, message_type), acceptance) else: results = self.reactor.fire(event_type, *args, **kwargs) return gather_results([ maybeDeferred(lambda x: x, result) for result in results]) def handle_reconnect(self): """Called when the connection with the broker is established again. The following needs to be done: - Re-register any previously registered message types, so the broker knows we have interest on them. - Re-register ourselves as client, so the broker knows we exist and will talk to us firing events and dispatching messages. """ for type in self._registered_messages: self.broker.register_client_accepted_message_type(type) self.broker.register_client(self.name) @remote def exit(self): """Stop the reactor and exit the process.""" # Stop with a short delay to give a chance to reply to the caller when # this method is invoked over AMP (typically by the broker, see also # landscape.broker.server.BrokerServer.exit). self.reactor.call_later(0.1, self.reactor.stop) landscape-client-14.01/landscape/broker/__init__.py0000644000175000017500000000312512301414317022065 0ustar andreasandreas"""The broker mediates communication between client and server. Each communication between client and server is initiated by the broker and performed with an HTTP POST, called `exchange`. The client sends messages to the server by including them in the request body, and the server sends messages to the client by including them in the request response (see L{landscape.broker.exchange} and L{landscape.broker.transport}). Client components running in different processes (like the manager and the monitor) connect to the broker process using Unix sockets and can then ask the broker to perform actions like queuing messages for delivery to the server or to dispatching them all messages of a given type that the broker receives from the server (see L{landscape.broker.server} and L{landscape.broker.amp}). When the broker is started for the first time, it will perform a few exchanges to register the client against the server using the values provided in the configuration file (see L{landscape.broker.registration}). If the registration is successful, or the client was previously registered, the broker will start pinging the server to check if there are messages that the server wants to deliver to the client and if so will schedule a urgent exchange (see L{landscape.broker.ping}). In case the ping check says that there are no messages from the server, the broker will still perform an exchange every 15 minutes (see L{BrokerConfiguration}), to deliver to the server possible messages generated by the client (i.e. by the broker itself or by the other client components like the monitor and the manager). """ landscape-client-14.01/landscape/broker/transport.py0000644000175000017500000001072112301414317022362 0ustar andreasandreas"""Low-level server communication.""" import time import logging import pprint import uuid import pycurl from landscape.lib.fetch import fetch from landscape.lib import bpickle from landscape.log import format_delta from landscape import SERVER_API, VERSION class HTTPTransport(object): """Transport makes a request to exchange message data over HTTP. @param url: URL of the remote Landscape server message system. @param pubkey: SSH public key used for secure communication. """ def __init__(self, reactor, url, pubkey=None): self._reactor = reactor self._url = url self._pubkey = pubkey def get_url(self): """Get the URL of the remote message system.""" return self._url def set_url(self, url): """Set the URL of the remote message system.""" self._url = url def _curl(self, payload, computer_id, exchange_token, message_api): headers = {"X-Message-API": message_api, "User-Agent": "landscape-client/%s" % VERSION, "Content-Type": "application/octet-stream"} if computer_id: headers["X-Computer-ID"] = computer_id if exchange_token: headers["X-Exchange-Token"] = str(exchange_token) curl = pycurl.Curl() return (curl, fetch(self._url, post=True, data=payload, headers=headers, cainfo=self._pubkey, curl=curl)) def exchange(self, payload, computer_id=None, exchange_token=None, message_api=SERVER_API): """Exchange message data with the server. @param payload: The object to send, it must be L{bpickle}-compatible. @param computer_id: The computer ID to send the message as (see also L{Identity}). @param exchange_token: The token that the server has given us at the last exchange. It's used to prove that we are still the same client. @type: C{dict} @return: The server's response to sent message or C{None} in case of error. @note: This code is thread safe (HOPEFULLY). """ spayload = bpickle.dumps(payload) start_time = time.time() if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: logging.debug("Sending payload:\n%s", pprint.pformat(payload)) try: curly, data = self._curl(spayload, computer_id, exchange_token, message_api) except: logging.exception("Error contacting the server at %s." % self._url) raise else: logging.info("Sent %d bytes and received %d bytes in %s.", len(spayload), len(data), format_delta(time.time() - start_time)) try: response = bpickle.loads(data) except: logging.exception("Server returned invalid data: %r" % data) return None else: if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: logging.debug( "Received payload:\n%s", pprint.pformat(response)) return response class FakeTransport(object): """Fake transport for testing purposes.""" def __init__(self, reactor=None, url=None, pubkey=None): self._pubkey = pubkey self.payloads = [] self.responses = [] self._current_response = 0 self.next_expected_sequence = 0 self.computer_id = None self.exchange_token = None self.message_api = None self.extra = {} self._url = url self._reactor = reactor def get_url(self): return self._url def set_url(self, url): self._url = url def exchange(self, payload, computer_id=None, exchange_token=None, message_api=SERVER_API): self.payloads.append(payload) self.computer_id = computer_id self.exchange_token = exchange_token self.message_api = message_api self.next_expected_sequence += len(payload.get("messages", [])) if self._current_response < len(self.responses): response = self.responses[self._current_response] self._current_response += 1 else: response = [] result = {"next-expected-sequence": self.next_expected_sequence, "next-exchange-token": unicode(uuid.uuid4()), "messages": response} result.update(self.extra) return result landscape-client-14.01/landscape/broker/amp.py0000644000175000017500000000670312301414317021110 0ustar andreasandreasfrom twisted.internet.defer import maybeDeferred, execute, succeed from landscape.lib.amp import RemoteObject, MethodCallArgument from landscape.amp import ComponentConnector, get_remote_methods from landscape.broker.server import BrokerServer from landscape.broker.client import BrokerClient from landscape.monitor.monitor import Monitor from landscape.manager.manager import Manager class RemoteBroker(RemoteObject): def call_if_accepted(self, type, callable, *args): """Call C{callable} if C{type} is an accepted message type.""" deferred_types = self.get_accepted_message_types() def got_accepted_types(result): if type in result: return callable(*args) deferred_types.addCallback(got_accepted_types) return deferred_types def call_on_event(self, handlers): """Call a given handler as soon as a certain event occurs. @param handlers: A dictionary mapping event types to callables, where an event type is string (the name of the event). When the first of the given event types occurs in the broker reactor, the associated callable will be fired. """ result = self.listen_events(handlers.keys()) return result.addCallback(lambda event_type: handlers[event_type]()) class FakeRemoteBroker(object): """Looks like L{RemoteBroker}, but actually talks to local objects.""" def __init__(self, exchanger, message_store, broker_server): self.exchanger = exchanger self.message_store = message_store self.broker_server = broker_server def __getattr__(self, name): """ Pass attributes through to the real L{BrokerServer}, after checking that they're encodable with AMP. """ original = getattr(self.broker_server, name, None) if (name in get_remote_methods(self.broker_server) and original is not None and callable(original)): def method(*args, **kwargs): for arg in args: assert MethodCallArgument.check(arg) for k, v in kwargs.iteritems(): assert MethodCallArgument.check(v) return execute(original, *args, **kwargs) return method else: raise AttributeError(name) def call_if_accepted(self, type, callable, *args): if type in self.message_store.get_accepted_types(): return maybeDeferred(callable, *args) return succeed(None) class RemoteBrokerConnector(ComponentConnector): """Helper to create connections with the L{BrokerServer}.""" remote = RemoteBroker component = BrokerServer class RemoteClientConnector(ComponentConnector): """Helper to create connections with the L{BrokerServer}.""" component = BrokerClient class RemoteMonitorConnector(RemoteClientConnector): """Helper to create connections with the L{Monitor}.""" component = Monitor class RemoteManagerConnector(RemoteClientConnector): """Helper for creating connections with the L{Monitor}.""" component = Manager def get_component_registry(): """Get a mapping of component name to connectors, for all components.""" all_connectors = [ RemoteBrokerConnector, RemoteClientConnector, RemoteMonitorConnector, RemoteManagerConnector ] return dict( (connector.component.name, connector) for connector in all_connectors) landscape-client-14.01/landscape/constants.py0000644000175000017500000000245212301414317021060 0ustar andreasandreas""" Hold constants used across landscape, to reduce import size when one only needs to look at those values. """ APT_PREFERENCES_SIZE_LIMIT = 1048576 # 1 MByte # The name "UBUNTU" is used in the variable name due to the fact that the path # is Ubuntu-specific, taken from /etc/login.defs. UBUNTU_PATH = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" SUCCESS_RESULT = 1 ERROR_RESULT = 100 DEPENDENCY_ERROR_RESULT = 101 CLIENT_VERSION_ERROR_RESULT = 102 POLICY_STRICT = 0 POLICY_ALLOW_INSTALLS = 1 POLICY_ALLOW_ALL_CHANGES = 2 # The amount of time to wait while we have unknown package data before # reporting an error to the server in response to an operation. # The two common cases of this are: # 1. The server requested an operation that we've decided requires some # dependencies, but we don't know the package ID of those dependencies. It # should only take a bit more than 10 minutes for that to be resolved by the # package reporter. # 2. We lost some package data, for example by a deb archive becoming # inaccessible for a while. The earliest we can reasonably assume that to be # resolved is in 60 minutes, when the package reporter runs again. # So we'll give the problem one chance to resolve itself, by only waiting for # one run of apt-update. UNKNOWN_PACKAGE_DATA_TIMEOUT = 70 * 60 landscape-client-14.01/landscape/monitor/0000755000175000017500000000000012301414317020156 5ustar andreasandreaslandscape-client-14.01/landscape/monitor/jujuinfo.py0000644000175000017500000000222012301414317022355 0ustar andreasandreasimport logging from landscape.lib.juju import get_juju_info from landscape.monitor.plugin import MonitorPlugin class JujuInfo(MonitorPlugin): """Plugin for reporting Juju information.""" persist_name = "juju-info" scope = "juju" def register(self, registry): super(JujuInfo, self).register(registry) self.call_on_accepted("juju-info", self.send_juju_message, True) def exchange(self, urgent=False): broker = self.registry.broker broker.call_if_accepted("juju-info", self.send_juju_message, urgent) def send_juju_message(self, urgent=False): message = self._create_juju_info_message() if message: message["type"] = "juju-info" logging.info("Queuing message with updated juju info.") self.registry.broker.send_message(message, self._session_id, urgent=urgent) def _create_juju_info_message(self): message = get_juju_info(self.registry.config) if message != self._persist.get("juju-info"): self._persist.set("juju-info", message) return message return None landscape-client-14.01/landscape/monitor/plugin.py0000644000175000017500000000546612301414317022041 0ustar andreasandreasfrom logging import info from twisted.internet.defer import succeed from landscape.log import format_object from landscape.lib.log import log_failure from landscape.broker.client import BrokerClientPlugin class MonitorPlugin(BrokerClientPlugin): """ @cvar persist_name: If specified as a string, a C{_persist} attribute will be available after registration. """ persist_name = None scope = None def register(self, monitor): super(MonitorPlugin, self).register(monitor) if self.persist_name is not None: self._persist = self.monitor.persist.root_at(self.persist_name) else: self._persist = None def _reset(self): if self.persist_name is not None: self.registry.persist.remove(self.persist_name) @property def persist(self): """Return our L{Persist}, if any.""" return self._persist @property def monitor(self): """An alias for the C{client} attribute.""" return self.client class DataWatcher(MonitorPlugin): """ A utility for plugins which send data to the Landscape server which does not constantly change. New messages will only be sent when the result of get_data() has changed since the last time it was called. Subclasses should provide a get_data method, and message_type, message_key, and persist_name class attributes. """ message_type = None message_key = None def get_message(self): """ Construct a message with the latest data, or None, if the data has not changed since the last call. """ data = self.get_data() if self._persist.get("data") != data: self._persist.set("data", data) return {"type": self.message_type, self.message_key: data} def send_message(self, urgent): message = self.get_message() if message is not None: info("Queueing a message with updated data watcher info " "for %s.", format_object(self)) result = self.registry.broker.send_message( message, self._session_id, urgent=urgent) def persist_data(message_id): self.persist_data() result.addCallback(persist_data) result.addErrback(log_failure) return result return succeed(None) def persist_data(self): """ Sub-classes that need to defer the saving of persistent data should override this method. """ pass def exchange(self, urgent=False): """ Conditionally add a message to the message store if new data is available. """ return self.registry.broker.call_if_accepted(self.message_type, self.send_message, urgent) landscape-client-14.01/landscape/monitor/usermonitor.py0000644000175000017500000000734112301414317023123 0ustar andreasandreasfrom twisted.internet.defer import maybeDeferred from landscape.lib.log import log_failure from landscape.amp import ComponentPublisher, ComponentConnector, remote from landscape.monitor.plugin import MonitorPlugin from landscape.user.changes import UserChanges from landscape.user.provider import UserProvider class UserMonitor(MonitorPlugin): """ A plugin which monitors the system user databases. """ persist_name = "users" scope = "users" run_interval = 3600 # 1 hour name = "usermonitor" def __init__(self, provider=None): if provider is None: provider = UserProvider() self._provider = provider self._publisher = None def register(self, registry): super(UserMonitor, self).register(registry) self.call_on_accepted("users", self._run_detect_changes, None) self._publisher = ComponentPublisher(self, self.registry.reactor, self.registry.config) self._publisher.start() def stop(self): """Stop listening for incoming AMP connections.""" if self._publisher: self._publisher.stop() self._publisher = None def _reset(self): """Reset user and group data.""" super(UserMonitor, self)._reset() return self._run_detect_changes() @remote def detect_changes(self, operation_id=None): return self.registry.broker.call_if_accepted( "users", self._run_detect_changes, operation_id) run = detect_changes def _run_detect_changes(self, operation_id=None): """ If changes are detected an C{urgent-exchange} is fired to send updates to the server immediately. @param operation_id: When present it will be included in the C{operation-id} field. """ from landscape.manager.usermanager import RemoteUserManagerConnector user_manager_connector = RemoteUserManagerConnector( self.registry.reactor, self.registry.config) # We'll skip checking the locked users if we're in monitor-only mode. if getattr(self.registry.config, "monitor_only", False): result = maybeDeferred(self._detect_changes, [], operation_id) else: def get_locked_usernames(user_manager): return user_manager.get_locked_usernames() def disconnect(locked_usernames): user_manager_connector.disconnect() return locked_usernames result = user_manager_connector.connect() result.addCallback(get_locked_usernames) result.addCallback(disconnect) result.addCallback(self._detect_changes, operation_id) result.addErrback(lambda f: self._detect_changes([], operation_id)) return result def _detect_changes(self, locked_users, operation_id=None): def update_snapshot(result): changes.snapshot() return result def log_error(result): log_failure(result, "Error occured calling send_message in " "_detect_changes") self._provider.locked_users = locked_users changes = UserChanges(self._persist, self._provider) message = changes.create_diff() if message: message["type"] = "users" if operation_id: message["operation-id"] = operation_id result = self.registry.broker.send_message( message, self._session_id, urgent=True) result.addCallback(update_snapshot) result.addErrback(log_error) return result class RemoteUserMonitorConnector(ComponentConnector): component = UserMonitor landscape-client-14.01/landscape/monitor/mountinfo.py0000644000175000017500000001306212301414317022550 0ustar andreasandreasimport time import os from landscape.lib.disk import get_mount_info, is_device_removable from landscape.lib.monitor import CoverageMonitor from landscape.accumulate import Accumulator from landscape.monitor.plugin import MonitorPlugin class MountInfo(MonitorPlugin): persist_name = "mount-info" scope = "disk" max_free_space_items_to_exchange = 200 def __init__(self, interval=300, monitor_interval=60 * 60, mounts_file="/proc/mounts", create_time=time.time, statvfs=None, mtab_file="/etc/mtab"): self.run_interval = interval self._monitor_interval = monitor_interval self._create_time = create_time self._mounts_file = mounts_file self._mtab_file = mtab_file if statvfs is None: statvfs = os.statvfs self._statvfs = statvfs self._create_time = create_time self._free_space = [] self._mount_info = [] self._mount_info_to_persist = None self.is_device_removable = is_device_removable def register(self, registry): super(MountInfo, self).register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) self._monitor = CoverageMonitor(self.run_interval, 0.8, "mount info snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("mount-info", self.send_messages, True) def create_messages(self): return filter(None, [self.create_mount_info_message(), self.create_free_space_message()]) def create_mount_info_message(self): if self._mount_info: message = {"type": "mount-info", "mount-info": self._mount_info} self._mount_info_to_persist = self._mount_info[:] self._mount_info = [] return message return None def create_free_space_message(self): if self._free_space: items_to_exchange = self._free_space[ :self.max_free_space_items_to_exchange] message = {"type": "free-space", "free-space": items_to_exchange} self._free_space = self._free_space[ self.max_free_space_items_to_exchange:] return message return None def send_messages(self, urgent=False): for message in self.create_messages(): d = self.registry.broker.send_message( message, self._session_id, urgent=urgent) if message["type"] == "mount-info": d.addCallback(lambda x: self.persist_mount_info()) def exchange(self): self.registry.broker.call_if_accepted("mount-info", self.send_messages) def persist_mount_info(self): for timestamp, mount_info in self._mount_info_to_persist: mount_point = mount_info["mount-point"] self._persist.set(("mount-info", mount_point), mount_info) self._mount_info_to_persist = None # This forces the registry to write the persistent store to disk # This means that the persistent data reflects the state of the # messages sent. self.registry.flush() def run(self): self._monitor.ping() now = int(self._create_time()) current_mount_points = set() for mount_info in self._get_mount_info(): mount_point = mount_info["mount-point"] free_space = mount_info.pop("free-space") key = ("accumulate-free-space", mount_point) step_data = self._accumulate(now, free_space, key) if step_data: timestamp = step_data[0] free_space = int(step_data[1]) self._free_space.append((timestamp, mount_point, free_space)) prev_mount_info = self._persist.get(("mount-info", mount_point)) if not prev_mount_info or prev_mount_info != mount_info: if mount_info not in [m for t, m in self._mount_info]: self._mount_info.append((now, mount_info)) current_mount_points.add(mount_point) def _get_mount_info(self): """Generator yields local mount points worth recording data for.""" bound_mount_points = self._get_bound_mount_points() for info in get_mount_info(self._mounts_file, self._statvfs): device = info["device"] mount_point = info["mount-point"] if (device.startswith("/dev/") and not mount_point.startswith("/dev/") and not self.is_device_removable(device) and not mount_point in bound_mount_points): yield info def _get_bound_mount_points(self): """ Returns a set of mount points that have the "bind" option by parsing /etc/mtab. """ bound_points = set() if not self._mtab_file or not os.path.isfile(self._mtab_file): return bound_points file = open(self._mtab_file, "r") for line in file: try: device, mount_point, filesystem, options = line.split()[:4] mount_point = mount_point.decode("string-escape") except ValueError: continue if "bind" in options.split(","): bound_points.add(mount_point) return bound_points landscape-client-14.01/landscape/monitor/cpuusage.py0000644000175000017500000000760112301414317022350 0ustar andreasandreasimport time import logging from landscape.accumulate import Accumulator from landscape.lib.monitor import CoverageMonitor from landscape.monitor.plugin import MonitorPlugin LAST_MESURE_KEY = "last-cpu-usage-measure" ACCUMULATOR_KEY = "cpu-usage-accumulator" class CPUUsage(MonitorPlugin): """ Plugin that captures CPU usage information. """ persist_name = "cpu-usage" scope = "cpu" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None def __init__(self, interval=30, monitor_interval=60 * 60, create_time=time.time): self._interval = interval self._monitor_interval = monitor_interval self._cpu_usage_points = [] self._create_time = create_time self._stat_file = "/proc/stat" def register(self, registry): super(CPUUsage, self).register(registry) self._accumulate = Accumulator(self._persist, registry.step_size) self.registry.reactor.call_every(self._interval, self.run) self._monitor = CoverageMonitor(self._interval, 0.8, "CPU usage snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("cpu-usage", self.send_message, True) def create_message(self): cpu_points = self._cpu_usage_points self._cpu_usage_points = [] return {"type": "cpu-usage", "cpu-usages": cpu_points} def send_message(self, urgent=False): message = self.create_message() if len(message["cpu-usages"]): self.registry.broker.send_message(message, self._session_id, urgent=urgent) def exchange(self, urgent=False): self.registry.broker.call_if_accepted("cpu-usage", self.send_message, urgent) def run(self): self._monitor.ping() new_timestamp = int(self._create_time()) new_cpu_usage = self._get_cpu_usage(self._stat_file) step_data = None if new_cpu_usage is not None: step_data = self._accumulate(new_timestamp, new_cpu_usage, ACCUMULATOR_KEY) if step_data is not None: self._cpu_usage_points.append(step_data) def _get_cpu_usage(self, stat_file): """ This method computes the CPU usage from C{stat_file}. """ result = None try: with open(stat_file, "r") as f: # The first line of the file is the CPU information aggregated # across cores. stat = f.readline() except IOError: logging.error("Could not open %s for reading, " "CPU usage cannot be computed.", stat_file) return None # The cpu line is composed of: # ["cpu", user, nice, system, idle, iowait, irq, softirq, steal, guest, # guest nice] # The fields are a sum of USER_HZ quantums since boot spent in each # "category". We need to keep track of what the previous measure was, # since the current CPU usage will be calculated on the delta between # the previous measure and the current measure. # Remove the trailing "\n" fields = stat.split()[1:] idle = int(fields[3]) value = sum(int(i) for i in fields) previous = self._persist.get(LAST_MESURE_KEY) if previous is not None and value != previous[0]: delta = value - previous[0] if delta >= 0: result = (delta - idle + previous[1]) / float(delta) self._persist.set(LAST_MESURE_KEY, (value, idle)) return result landscape-client-14.01/landscape/monitor/loadaverage.py0000644000175000017500000000455312301414317023011 0ustar andreasandreasimport os import time from landscape.accumulate import Accumulator from landscape.lib.monitor import CoverageMonitor from landscape.monitor.plugin import MonitorPlugin class LoadAverage(MonitorPlugin): """Plugin captures information about load average.""" persist_name = "load-average" scope = "load" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None def __init__(self, interval=15, monitor_interval=60*60, create_time=time.time, get_load_average=os.getloadavg): self._interval = interval self._monitor_interval = monitor_interval self._create_time = create_time self._load_averages = [] self._get_load_average = get_load_average def register(self, registry): super(LoadAverage, self).register(registry) self._accumulate = Accumulator(self._persist, registry.step_size) self.registry.reactor.call_every(self._interval, self.run) self._monitor = CoverageMonitor(self._interval, 0.8, "load average snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("load-average", self.send_message, True) def create_message(self): load_averages = self._load_averages self._load_averages = [] return {"type": "load-average", "load-averages": load_averages} def exchange(self, urgent=False): self.registry.broker.call_if_accepted("load-average", self.send_message, urgent) def send_message(self, urgent=False): message = self.create_message() if len(message["load-averages"]): self.registry.broker.send_message(message, self._session_id, urgent=urgent) def run(self): self._monitor.ping() new_timestamp = int(self._create_time()) new_load_average = self._get_load_average()[0] step_data = self._accumulate(new_timestamp, new_load_average, "accumulate") if step_data: self._load_averages.append(step_data) landscape-client-14.01/landscape/monitor/swiftdeviceinfo.py0000644000175000017500000001700612301414317023724 0ustar andreasandreasimport logging import time import os import json from landscape.lib.fetch import fetch, HTTPCodeError, PyCurlError, FetchError from landscape.lib.monitor import CoverageMonitor from landscape.lib.network import get_active_device_info from landscape.monitor.plugin import MonitorPlugin class SwiftDeviceInfo(MonitorPlugin): persist_name = "swift-device-info" scope = "storage" def __init__(self, interval=300, monitor_interval=60 * 60, create_time=time.time, swift_config="/etc/swift/object-server.conf", swift_ring="/etc/swift/object.ring.gz"): self.run_interval = interval self._monitor_interval = monitor_interval self._create_time = create_time self._fetch = fetch self._get_network_devices = get_active_device_info self._swift_config = swift_config # If exists, we are a swift node self._swift_ring = swift_ring # To discover swift recon port self._swift_recon_url = None self._create_time = create_time self._swift_device_info = [] self._swift_device_info_to_persist = [] self.enabled = True def register(self, registry): super(SwiftDeviceInfo, self).register(registry) self._monitor = CoverageMonitor(self.run_interval, 0.8, "swift device info snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("swift-device-info", self.send_messages, True) def create_swift_device_info_message(self): if self._swift_device_info: message = {"type": "swift-device-info", "swift-device-info": self._swift_device_info} self._swift_device_info_to_persist = self._swift_device_info[:] self._swift_device_info = [] return message return None def send_messages(self, urgent=False): message = self.create_swift_device_info_message() if message: logging.info("Queueing message with updated swift device info.") d = self.registry.broker.send_message( message, self._session_id, urgent=urgent) d.addCallback(lambda x: self.persist_swift_info()) def exchange(self): self.registry.broker.call_if_accepted("swift-device-info", self.send_messages) def persist_swift_info(self): for swift_device_info in self._swift_device_info_to_persist: device_name = swift_device_info["device"] key = (self.persist_name, device_name) self._persist.set(key, swift_device_info) self._swift_device_info_to_persist = None # This forces the registry to write the persistent store to disk # This means that the persistent data reflects the state of the # messages sent. self.registry.flush() def run(self): if not self.enabled: return self._monitor.ping() current_swift_devices = self._get_swift_devices() current_device_names = [] for swift_info in current_swift_devices: device_name = swift_info["device"] current_device_names.append(device_name) key = (self.persist_name, device_name) prev_swift_info = self._persist.get(key) if not prev_swift_info or prev_swift_info != swift_info: if swift_info not in self._swift_device_info: self._swift_device_info.append(swift_info) # Get all persisted devices and remove those that no longer exist persisted_devices = self._persist.get(self.persist_name) if persisted_devices: for device_name in persisted_devices.keys(): if device_name not in current_device_names: self._persist.remove((self.persist_name, device_name)) def _get_swift_devices(self): config_file = self._swift_config # Check if a swift storage config file is available. No need to run # if we know that we're not on a swift monitor node anyway. if not os.path.exists(config_file): # There is no config file - it's not a swift storage machine. self.enabled = False logging.info( "This does not appear to be a swift storage server. '%s' " "plugin has been disabled." % self.persist_name) return [] # Extract the swift service URL from the ringfile and cache it. if self._swift_recon_url is None: ring = self._get_ring() if ring is None: return [] network_devices = self._get_network_devices() local_ips = [device["ip_address"] for device in network_devices] # Grab first swift service with an IP on this host for dev in ring.devs: if dev and dev["ip"] in local_ips: self._swift_recon_url = "http://%s:%d/recon/diskusage" % ( dev['ip'], dev['port']) break if self._swift_recon_url is None: self.enabled = False logging.error( "Local swift service not found. '%s' plugin has " "been disabled." % self.persist_name) return [] recon_disk_info = self._get_swift_disk_usage() # We don't care about avail and free figures because we track # free_space for mounted devices in free-space messages return [{"device": "/dev/%s" % device["device"], "mounted": device["mounted"]} for device in recon_disk_info] def _get_swift_disk_usage(self): """ Query the swift storage usage data by parsing the curled recon data from http://localhost:<_swift_service_port>/recon/diskusage. Lots of recon data for the picking described at: http://docs.openstack.org/developer/swift/admin_guide.html """ error_message = None try: content = self._fetch(self._swift_recon_url) except HTTPCodeError, error: error_message = ( "Swift service is running without swift-recon enabled.") except (FetchError, PyCurlError), error: error_message = ( "Swift service not available at %s. %s." % (self._swift_recon_url, str(error))) if error_message is not None: self.enabled = False logging.error("%s '%s' plugin has been disabled." % ( error_message, self.persist_name)) return None if not content: return None swift_disk_usages = json.loads(content) # list of device dicts return swift_disk_usages def _get_ring(self): """Return ring-file object from self._swift_ring location""" if not os.path.exists(self._swift_ring): logging.warning( "Swift ring files are not available yet.") return None try: from swift.common.ring import Ring except ImportError: self.enabled = False logging.error( "Swift python common libraries not found. '%s' plugin has " "been disabled." % self.persist_name) return None return Ring(self._swift_ring) landscape-client-14.01/landscape/monitor/service.py0000644000175000017500000000433612301414317022176 0ustar andreasandreas"""Deployment code for the monitor.""" import os from twisted.python.reflect import namedClass from landscape.service import LandscapeService, run_landscape_service from landscape.monitor.config import MonitorConfiguration from landscape.monitor.monitor import Monitor from landscape.broker.amp import RemoteBrokerConnector from landscape.amp import ComponentPublisher class MonitorService(LandscapeService): """ The core Twisted Service which creates and runs all necessary monitoring components when started. """ service_name = Monitor.name def __init__(self, config): self.persist_filename = os.path.join( config.data_path, "%s.bpickle" % self.service_name) super(MonitorService, self).__init__(config) self.plugins = self.get_plugins() self.monitor = Monitor(self.reactor, self.config, self.persist, persist_filename=self.persist_filename) self.publisher = ComponentPublisher(self.monitor, self.reactor, self.config) def get_plugins(self): return [namedClass("landscape.monitor.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in self.config.plugin_factories] def startService(self): """Start the monitor.""" super(MonitorService, self).startService() self.publisher.start() def start_plugins(broker): self.broker = broker self.monitor.broker = broker for plugin in self.plugins: self.monitor.add(plugin) return self.broker.register_client(self.service_name) self.connector = RemoteBrokerConnector(self.reactor, self.config) connected = self.connector.connect() return connected.addCallback(start_plugins) def stopService(self): """Stop the monitor. The monitor is flushed to ensure that things like persist databases get saved to disk. """ self.publisher.stop() self.monitor.flush() self.connector.disconnect() super(MonitorService, self).stopService() def run(args): run_landscape_service(MonitorConfiguration, MonitorService, args) landscape-client-14.01/landscape/monitor/activeprocessinfo.py0000644000175000017500000000534312301414317024263 0ustar andreasandreasimport subprocess from landscape.diff import diff from landscape.lib.process import ProcessInformation from landscape.lib.jiffies import detect_jiffies from landscape.monitor.plugin import DataWatcher class ActiveProcessInfo(DataWatcher): message_type = "active-process-info" scope = "process" def __init__(self, proc_dir="/proc", boot_time=None, jiffies=None, uptime=None, popen=subprocess.Popen): super(ActiveProcessInfo, self).__init__() self._proc_dir = proc_dir self._persist_processes = {} self._previous_processes = {} self._jiffies_per_sec = jiffies or detect_jiffies() self._popen = popen self._first_run = True self._process_info = ProcessInformation(proc_dir=proc_dir, jiffies=jiffies, boot_time=boot_time, uptime=uptime) def register(self, manager): super(ActiveProcessInfo, self).register(manager) self.call_on_accepted(self.message_type, self.exchange, True) def _reset(self): """Reset active process data.""" self._first_run = True self._persist_processes = {} self._previous_processes = {} def get_message(self): message = {} if self._first_run: message["kill-all-processes"] = True message.update(self._detect_process_changes()) if message: message["type"] = "active-process-info" return message return None def persist_data(self): self._first_run = False self._persist_processes = self._previous_processes self._previous_processes = {} # This forces the registry to write the persistent store to disk # This means that the persistent data reflects the state of the # messages sent. self.registry.flush() def _get_processes(self): processes = {} for process_info in self._process_info.get_all_process_info(): if process_info["state"] != "X": processes[process_info["pid"]] = process_info return processes def _detect_process_changes(self): changes = {} processes = self._get_processes() creates, updates, deletes = diff(self._persist_processes, processes) if creates: changes["add-processes"] = list(creates.itervalues()) if updates: changes["update-processes"] = list(updates.itervalues()) if deletes: changes["kill-processes"] = list(deletes.iterkeys()) # Update cached values for use on the next run. self._previous_processes = processes return changes landscape-client-14.01/landscape/monitor/computerinfo.py0000644000175000017500000001451712301414317023252 0ustar andreasandreasimport os import logging from twisted.internet.defer import inlineCallbacks, returnValue from landscape.lib.fetch import fetch_async from landscape.lib.fs import read_file from landscape.lib.lsb_release import LSB_RELEASE_FILENAME, parse_lsb_release from landscape.lib.cloud import fetch_ec2_meta_data from landscape.lib.network import get_fqdn from landscape.monitor.plugin import MonitorPlugin METADATA_RETRY_MAX = 3 # Number of retries to get EC2 meta-data class DistributionInfoError(Exception): pass class ComputerInfo(MonitorPlugin): """Plugin captures and reports basic computer information.""" persist_name = "computer-info" scope = "computer" def __init__(self, get_fqdn=get_fqdn, meminfo_filename="/proc/meminfo", lsb_release_filename=LSB_RELEASE_FILENAME, root_path="/", fetch_async=fetch_async): self._get_fqdn = get_fqdn self._meminfo_filename = meminfo_filename self._lsb_release_filename = lsb_release_filename self._root_path = root_path self._cloud_instance_metadata = None self._cloud_retries = 0 self._fetch_async = fetch_async def register(self, registry): super(ComputerInfo, self).register(registry) self._annotations_path = registry.config.annotations_path self.call_on_accepted("computer-info", self.send_computer_message, True) self.call_on_accepted("distribution-info", self.send_distribution_message, True) self.call_on_accepted("cloud-instance-metadata", self.send_cloud_instance_metadata_message, True) def send_computer_message(self, urgent=False): message = self._create_computer_info_message() if message: message["type"] = "computer-info" logging.info("Queueing message with updated computer info.") self.registry.broker.send_message(message, self._session_id, urgent=urgent) def send_distribution_message(self, urgent=False): message = self._create_distribution_info_message() if message: message["type"] = "distribution-info" logging.info("Queueing message with updated distribution info.") self.registry.broker.send_message(message, self._session_id, urgent=urgent) @inlineCallbacks def send_cloud_instance_metadata_message(self, urgent=False): message = yield self._create_cloud_instance_metadata_message() if message: message["type"] = "cloud-instance-metadata" logging.info("Queueing message with updated cloud instance " "metadata.") self.registry.broker.send_message(message, self._session_id, urgent=urgent) def exchange(self, urgent=False): broker = self.registry.broker broker.call_if_accepted("computer-info", self.send_computer_message, urgent) broker.call_if_accepted("distribution-info", self.send_distribution_message, urgent) broker.call_if_accepted("cloud-instance-metadata", self.send_cloud_instance_metadata_message, urgent) def _create_computer_info_message(self): message = {} self._add_if_new(message, "hostname", self._get_fqdn()) total_memory, total_swap = self._get_memory_info() self._add_if_new(message, "total-memory", total_memory) self._add_if_new(message, "total-swap", total_swap) annotations = {} if os.path.exists(self._annotations_path): for key in os.listdir(self._annotations_path): annotations[key] = read_file( os.path.join(self._annotations_path, key)) if annotations: self._add_if_new(message, "annotations", annotations) return message def _add_if_new(self, message, key, value): if value != self._persist.get(key): self._persist.set(key, value) message[key] = value def _create_distribution_info_message(self): message = self._get_distribution_info() if message != self._persist.get("distribution-info"): self._persist.set("distribution-info", message) return message return None def _get_memory_info(self): """Get details in megabytes and return a C{(memory, swap)} tuple.""" message = {} file = open(self._meminfo_filename) for line in file: if line != '\n': parts = line.split(":") key = parts[0] if key in ["MemTotal", "SwapTotal"]: value = int(parts[1].strip().split(" ")[0]) message[key] = value file.close() return (message["MemTotal"] // 1024, message["SwapTotal"] // 1024) def _get_distribution_info(self): """Get details about the distribution.""" message = {} message.update(parse_lsb_release(self._lsb_release_filename)) return message @inlineCallbacks def _create_cloud_instance_metadata_message(self): """Fetch cloud metadata and insert it in a message.""" message = None if (self._cloud_instance_metadata is None and self._cloud_retries < METADATA_RETRY_MAX): self._cloud_instance_metadata = yield self._fetch_ec2_meta_data() message = self._cloud_instance_metadata returnValue(message) def _fetch_ec2_meta_data(self): """Fetch information about the cloud instance.""" if self._cloud_retries == 0: logging.info("Querying cloud meta-data.") deferred = fetch_ec2_meta_data(self._fetch_async) def log_no_meta_data_found(error): self._cloud_retries += 1 if self._cloud_retries >= METADATA_RETRY_MAX: logging.info("No cloud meta-data available. %s" % error.getErrorMessage()) def log_success(result): logging.info("Acquired cloud meta-data.") return result deferred.addCallback(log_success) deferred.addErrback(log_no_meta_data_found) return deferred landscape-client-14.01/landscape/monitor/cephusage.py0000644000175000017500000001405412301414317022500 0ustar andreasandreasimport time import os import json import logging import re from twisted.internet.defer import inlineCallbacks, returnValue from landscape.accumulate import Accumulator from landscape.lib.monitor import CoverageMonitor from landscape.lib.twisted_util import spawn_process from landscape.monitor.plugin import MonitorPlugin class CephUsage(MonitorPlugin): """ Plugin that captures Ceph usage information. This only works if the client runs on one of the Ceph monitor nodes, and it noops otherwise. The plugin requires the 'ceph' command to be available, which is run with a config file in /ceph-client/ceph.landscape-client.conf with the following config: [global] auth supported = cephx keyring = mon host = :6789 The configured keyring can be generated with: ceph-authtool --create-keyring --name=client.landscape-client --add-key= The landscape-client charm automatically provides the client configuration and key when deployed as subordinate of a ceph node. """ persist_name = "ceph-usage" scope = "storage" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None _usage_regexp = re.compile( ".*pgmap.*data, (\d+) MB used, (\d+) MB / (\d+) MB avail.*", flags=re.S) def __init__(self, interval=30, monitor_interval=60 * 60, create_time=time.time): self._interval = interval self._monitor_interval = monitor_interval self._ceph_usage_points = [] self._ceph_ring_id = None self._create_time = create_time self._ceph_config = None def register(self, registry): super(CephUsage, self).register(registry) self._ceph_config = os.path.join( self.registry.config.data_path, "ceph-client", "ceph.landscape-client.conf") self._accumulate = Accumulator(self._persist, self._interval) self._monitor = CoverageMonitor( self._interval, 0.8, "Ceph usage snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._interval, self.run) self.registry.reactor.call_every( self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("ceph-usage", self.send_message, True) def create_message(self): ceph_points = self._ceph_usage_points ring_id = self._ceph_ring_id self._ceph_usage_points = [] return {"type": "ceph-usage", "ceph-usages": ceph_points, "ring-id": ring_id} def send_message(self, urgent=False): message = self.create_message() if message["ceph-usages"] and message["ring-id"] is not None: self.registry.broker.send_message(message, self._session_id, urgent=urgent) def exchange(self, urgent=False): self.registry.broker.call_if_accepted( "ceph-usage", self.send_message, urgent) @inlineCallbacks def run(self): self._monitor.ping() # Check if a ceph config file is available. If it's not , it's not a # ceph machine or ceph is set up yet. No need to run anything in this # case. if self._ceph_config is None or not os.path.exists(self._ceph_config): returnValue(None) # Extract the ceph ring Id and cache it. if self._ceph_ring_id is None: self._ceph_ring_id = yield self._get_ceph_ring_id() new_timestamp = int(self._create_time()) new_ceph_usage = yield self._get_ceph_usage() step_data = None if new_ceph_usage is not None: step_data = self._accumulate( new_timestamp, new_ceph_usage, "ceph-usage-accumulator") if step_data is not None: self._ceph_usage_points.append(step_data) def _get_ceph_usage(self): """ Grab the ceph usage data by parsing the output of the "ceph status" command output. """ def parse(output): if output is None: return None result = self._usage_regexp.match(output) if not result: logging.error("Could not parse command output: '%s'." % output) return None (used, available, total) = result.groups() # Note: used + available is NOT equal to total (there is some used # space for duplication and system info etc...) filled = int(total) - int(available) return filled / float(total) return self._get_status_command_output().addCallback(parse) def _get_status_command_output(self): return self._run_ceph_command("status") def _get_ceph_ring_id(self): """Extract ceph ring id from ceph command output.""" def parse(output): if output is None: return None try: quorum_status = json.loads(output) ring_id = quorum_status["monmap"]["fsid"] except: logging.error( "Could not get ring_id from output: '%s'." % output) return None return ring_id return self._get_quorum_command_output().addCallback(parse) def _get_quorum_command_output(self): return self._run_ceph_command("quorum_status") def _run_ceph_command(self, *args): """ Run the ceph command with the specified options using landscape ceph key. The keyring is expected to contain a configuration stanza with a key for the "client.landscape-client" id. """ params = ["--conf", self._ceph_config, "--id", "landscape-client"] params.extend(args) deferred = spawn_process("ceph", args=params) # If the command line client isn't available, we assume it's not a ceph # monitor machine. deferred.addCallback( lambda (out, err, code): out if code == 0 else None) return deferred landscape-client-14.01/landscape/monitor/aptpreferences.py0000644000175000017500000000373712301414317023550 0ustar andreasandreasimport os from landscape.lib.fs import read_file from landscape.constants import APT_PREFERENCES_SIZE_LIMIT from landscape.monitor.plugin import DataWatcher class AptPreferences(DataWatcher): """ Report the system APT preferences configuration. """ persist_name = "apt-preferences" message_type = "apt-preferences" message_key = "data" run_interval = 900 # 15 minutes scope = "package" size_limit = APT_PREFERENCES_SIZE_LIMIT def __init__(self, etc_apt_directory="/etc/apt"): self._etc_apt_directory = etc_apt_directory def get_data(self): """Return a C{dict} mapping APT preferences files to their contents. If no APT preferences configuration is set at all on the system, then simply return C{None} """ data = {} read_unicode = lambda filename: unicode(read_file(filename)) preferences_filename = os.path.join(self._etc_apt_directory, u"preferences") if os.path.exists(preferences_filename): data[preferences_filename] = read_unicode(preferences_filename) preferences_directory = os.path.join(self._etc_apt_directory, u"preferences.d") if os.path.isdir(preferences_directory): for entry in os.listdir(preferences_directory): filename = os.path.join(preferences_directory, entry) if os.path.isfile(filename): data[filename] = read_unicode(filename) if data == {}: return None item_size_limit = self.size_limit / len(data.keys()) for filename, contents in data.iteritems(): if len(filename) + len(contents) > item_size_limit: truncated_contents_size = item_size_limit - len(filename) data[filename] = data[filename][0:truncated_contents_size] return data def run(self): return self.exchange(urgent=True) landscape-client-14.01/landscape/monitor/computeruptime.py0000644000175000017500000001424212301414317023615 0ustar andreasandreasimport time from datetime import datetime import os import struct from landscape.lib.timestamp import to_timestamp from landscape.monitor.plugin import MonitorPlugin def get_uptime(uptime_file=u"/proc/uptime"): """ This parses a file in /proc/uptime format and returns a floating point version of the first value (the actual uptime). """ data = file(uptime_file, "r").readline() up, idle = data.split() return float(up) class LoginInfo(object): """Information about a login session gathered from wtmp or utmp.""" # FIXME This format string works fine on my hardware, but *may* be # different depending on the values of __WORDSIZE and # __WORDSIZE_COMPAT32 defined in /usr/include/bits/utmp.h:68 (in # the definition of struct utmp). Make sure it works # everywhere. -jk RAW_FORMAT = "hi32s4s32s256shhiiiiiii20s" def __init__(self, raw_data): info = struct.unpack(self.RAW_FORMAT, raw_data) self.login_type = info[0] self.pid = info[1] self.tty_device = info[2].strip("\0") self.id = info[3].strip("\0") self.username = info[4].strip("\0") self.hostname = info[5].strip("\0") self.termination_status = info[6] self.exit_status = info[7] self.session_id = info[8] self.entry_time = datetime.utcfromtimestamp(info[9]) # FIXME Convert this to a dotted decimal string. -jk self.remote_ip_address = info[11] class LoginInfoReader(object): """Reader parses C{/var/log/wtmp} and/or C{/var/run/utmp} files. @file: Initialize the reader with an open file. """ def __init__(self, file): self._file = file self._struct_length = struct.calcsize(LoginInfo.RAW_FORMAT) def login_info(self): """Returns a generator that yields LoginInfo objects.""" while True: info = self.read_next() if not info: break yield info def read_next(self): """Returns login data or None if no login data is available.""" data = self._file.read(self._struct_length) if data and len(data) == self._struct_length: return LoginInfo(data) return None class BootTimes(object): _last_boot = None _last_shutdown = None def __init__(self, filename="/var/log/wtmp", boots_newer_than=0, shutdowns_newer_than=0): self._filename = filename self._boots_newer_than = boots_newer_than self._shutdowns_newer_than = shutdowns_newer_than def get_times(self): reboot_times = [] shutdown_times = [] reader = LoginInfoReader(file(self._filename)) self._last_boot = self._boots_newer_than self._last_shutdown = self._shutdowns_newer_than for info in reader.login_info(): if info.tty_device.startswith("~"): timestamp = to_timestamp(info.entry_time) if (info.username == "reboot" and timestamp > self._last_boot): reboot_times.append(timestamp) self._last_boot = timestamp elif (info.username == "shutdown" and timestamp > self._last_shutdown): shutdown_times.append(timestamp) self._last_shutdown = timestamp return reboot_times, shutdown_times def get_last_boot_time(self): if self._last_boot is None: self._last_boot = int(time.time() - get_uptime()) return self._last_boot class ComputerUptime(MonitorPlugin): """Plugin reports information about computer uptime.""" persist_name = "computer-uptime" scope = "computer" def __init__(self, wtmp_file="/var/log/wtmp"): self._first_run = True self._wtmp_file = wtmp_file def register(self, registry): """Register this plugin with the specified plugin manager.""" super(ComputerUptime, self).register(registry) registry.reactor.call_on("run", self.run) self.call_on_accepted("computer-uptime", self.run, True) def run(self, urgent=False): """Create a message and put it on the message queue. The last logrotated file, if it exists, will be checked the first time the plugin runs. This behaviour ensures we don't accidentally miss a reboot/shutdown event if the machine is rebooted and wtmp is logrotated before the client starts. """ broker = self.registry.broker if self._first_run: filename = self._wtmp_file + ".1" if os.path.isfile(filename): broker.call_if_accepted("computer-uptime", self.send_message, filename, urgent) if os.path.isfile(self._wtmp_file): broker.call_if_accepted("computer-uptime", self.send_message, self._wtmp_file, urgent) def send_message(self, filename, urgent=False): message = self._create_message(filename) if "shutdown-times" in message or "startup-times" in message: message["type"] = "computer-uptime" self.registry.broker.send_message(message, self._session_id, urgent=urgent) def _create_message(self, filename): """Generate a message with new startup and shutdown times.""" message = {} startup_times = [] shutdown_times = [] last_startup_time = self._persist.get("last-startup-time", 0) last_shutdown_time = self._persist.get("last-shutdown-time", 0) times = BootTimes(filename, boots_newer_than=last_startup_time, shutdowns_newer_than=last_shutdown_time) startup_times, shutdown_times = times.get_times() if startup_times: self._persist.set("last-startup-time", startup_times[-1]) message["startup-times"] = startup_times if shutdown_times: self._persist.set("last-shutdown-time", shutdown_times[-1]) message["shutdown-times"] = shutdown_times return message landscape-client-14.01/landscape/monitor/rebootrequired.py0000644000175000017500000000457312301414317023574 0ustar andreasandreasimport os import logging from landscape.lib.fs import read_file from landscape.monitor.plugin import MonitorPlugin REBOOT_REQUIRED_FILENAME = "/var/run/reboot-required" class RebootRequired(MonitorPlugin): """ Report whether the system requires a reboot. @param reboot_required_filename: The path to the flag file that indicates if the system needs to be rebooted. """ persist_name = "reboot-required" scope = "package" run_interval = 900 # 15 minutes run_immediately = True def __init__(self, reboot_required_filename=REBOOT_REQUIRED_FILENAME): self._flag_filename = reboot_required_filename self._packages_filename = reboot_required_filename + ".pkgs" def _get_flag(self): """Return a boolean indicating whether the computer needs a reboot.""" return os.path.exists(self._flag_filename) def _get_packages(self): """Return the list of packages that required a reboot, if any.""" if not os.path.exists(self._packages_filename): return [] lines = read_file(self._packages_filename).splitlines() packages = set(line.strip().decode("utf-8") for line in lines if line) return sorted(packages) def _create_message(self): """Return the body of the reboot-required message to be sent.""" message = {} flag = self._get_flag() packages = self._get_packages() for key, value in [("flag", flag), ("packages", packages)]: if value == self._persist.get(key): continue self._persist.set(key, value) message[key] = value return message def send_message(self): """Send a reboot-required message if needed. A message will be sent only if the reboot-required status of the system has changed. """ message = self._create_message() if message: message["type"] = "reboot-required-info" logging.info("Queueing message with updated " "reboot-required status.") self.registry.broker.send_message(message, self._session_id, urgent=True) def run(self): """Send reboot-required messages if the server accepts them.""" return self.registry.broker.call_if_accepted( "reboot-required-info", self.send_message) landscape-client-14.01/landscape/monitor/networkactivity.py0000644000175000017500000001113612301414317024000 0ustar andreasandreas""" A monitor that collects data on network activity, and sends messages with the inbound/outbound traffic per interface per step interval. """ import time from landscape.lib.network import get_network_traffic, is_64 from landscape.accumulate import Accumulator from landscape.monitor.plugin import MonitorPlugin class NetworkActivity(MonitorPlugin): """ Collect data regarding a machine's network activity. """ message_type = "network-activity" persist_name = message_type run_interval = 30 _rollover_maxint = 0 scope = "network" max_network_items_to_exchange = 200 def __init__(self, network_activity_file="/proc/net/dev", create_time=time.time): self._source_file = network_activity_file # accumulated values for sending out via message self._network_activity = {} # our last traffic sample for calculating a traffic delta self._last_activity = {} self._create_time = create_time # We don't rollover on 64 bits, as 16 exabytes is a lot. if not is_64(): self._rollover_maxint = pow(2, 32) def register(self, registry): super(NetworkActivity, self).register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) self.call_on_accepted("network-activity", self.exchange, True) def create_message(self): network_activity = {} items = 0 for interface, data in list(self._network_activity.items()): if data: network_activity[interface] = [] while data and items < self.max_network_items_to_exchange: item = data.pop(0) network_activity[interface].append(item) items += 1 if items >= self.max_network_items_to_exchange: break if not network_activity: return return {"type": "network-activity", "activities": network_activity} def send_message(self, urgent): message = self.create_message() if not message: return self.registry.broker.send_message( message, self._session_id, urgent=urgent) def exchange(self, urgent=False): self.registry.broker.call_if_accepted("network-activity", self.send_message, urgent) def _traffic_delta(self, new_traffic): """ Given network activity metrics across all interfaces, calculate and return the delta data transferred for inbound and outbound traffic. Returns a tuple of interface name, outbound delta, inbound delta. """ for interface in new_traffic: traffic = new_traffic[interface] if interface in self._last_activity: previous_out, previous_in = self._last_activity[interface] delta_out = traffic["send_bytes"] - previous_out delta_in = traffic["recv_bytes"] - previous_in if delta_out < 0: delta_out += self._rollover_maxint if delta_in < 0: delta_in += self._rollover_maxint # If it's still zero or less, we discard the value. The next # value will be compared to the current traffic, and # hopefully things will catch up. if delta_out <= 0 and delta_in <= 0: continue yield interface, delta_out, delta_in self._last_activity[interface] = ( traffic["send_bytes"], traffic["recv_bytes"]) for interface in self._last_activity.keys(): if interface not in new_traffic: del self._last_activity[interface] def run(self): """ Sample network traffic statistics and store them into the accumulator, recording step data. """ new_timestamp = int(self._create_time()) new_traffic = get_network_traffic(self._source_file) for interface, delta_out, delta_in in self._traffic_delta(new_traffic): out_step_data = self._accumulate( new_timestamp, delta_out, "delta-out-%s" % interface) in_step_data = self._accumulate( new_timestamp, delta_in, "delta-in-%s" % interface) # there's only data when we cross a step boundary if not (in_step_data and out_step_data): continue steps = self._network_activity.setdefault(interface, []) steps.append( (in_step_data[0], int(in_step_data[1]), int(out_step_data[1]))) landscape-client-14.01/landscape/monitor/config.py0000644000175000017500000000227512301414317022003 0ustar andreasandreasfrom landscape.deployment import Configuration ALL_PLUGINS = ["ActiveProcessInfo", "ComputerInfo", "LoadAverage", "MemoryInfo", "MountInfo", "ProcessorInfo", "Temperature", "PackageMonitor", "UserMonitor", "RebootRequired", "AptPreferences", "NetworkActivity", "NetworkDevice", "UpdateManager", "CPUUsage", "SwiftDeviceInfo", "CephUsage", "JujuInfo"] class MonitorConfiguration(Configuration): """Specialized configuration for the Landscape Monitor.""" def make_parser(self): """ Specialize L{Configuration.make_parser}, adding many monitor-specific options. """ parser = super(MonitorConfiguration, self).make_parser() parser.add_option("--monitor-plugins", metavar="PLUGIN_LIST", help="Comma-delimited list of monitor plugins to " "use. ALL means use all plugins.", default="ALL") return parser @property def plugin_factories(self): if self.monitor_plugins == "ALL": return ALL_PLUGINS return [x.strip() for x in self.monitor_plugins.split(",")] landscape-client-14.01/landscape/monitor/tests/0000755000175000017500000000000012301414317021320 5ustar andreasandreaslandscape-client-14.01/landscape/monitor/tests/test_swiftdeviceinfo.py0000644000175000017500000003511112301414317026122 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.lib.fetch import HTTPCodeError from landscape.monitor.swiftdeviceinfo import SwiftDeviceInfo from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY class FakeRingInfo(object): def __init__(self, ip_port_tuples=[]): self.devs = [] for ip, port in ip_port_tuples: self.devs.append({"ip": ip, "port": port}) class SwiftDeviceInfoTest(LandscapeTest): """Tests for swift-device-info plugin.""" helpers = [MonitorHelper] def setUp(self): LandscapeTest.setUp(self) self.mstore.set_accepted_types(["swift-device-info"]) def test_exchange_messages(self): """ The swift_device_info plugin queues message when manager.exchange() is called. Each message should be aligned to a step boundary; only a sing message with the latest swift device information will be delivered in a single message. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) plugin._get_swift_devices = fake_swift_devices step_size = self.monitor.step_size self.monitor.add(plugin) # Exchange should trigger a flush of the persist database registry_mocker = self.mocker.replace(plugin.registry) registry_mocker.flush() self.mocker.result(None) self.mocker.replay() self.reactor.advance(step_size * 2) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) expected_message_content = [ {"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] swift_devices = messages[0]["swift-device-info"] self.assertEqual(swift_devices, expected_message_content) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, None will be returned when messages are created. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices self.reactor.advance(self.monitor.step_size) message = plugin.create_swift_device_info_message() self.assertEqual(message.keys(), ["swift-device-info", "type"]) message = plugin.create_swift_device_info_message() self.assertEqual(message, None) def test_never_exchange_empty_messages(self): """ When the plugin has no data, its various create_X_message() methods will return None. Empty or null messages should never be queued. """ self.mstore.set_accepted_types(["load-average"]) plugin = SwiftDeviceInfo() self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_messages_with_swift_data(self): """ All swift-affiliated devices are sent in swift-device-info messages. Both mounted and unmounted swift devices send data. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) plugin._get_swift_devices = fake_swift_devices step_size = self.monitor.step_size self.monitor.add(plugin) plugin.run() self.reactor.advance(step_size) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) # Need to see both mounted and unmounted swift device info self.assertEqual( messages[0].get("swift-device-info"), [{'device': u'/dev/hdf', 'mounted': True}, {'device': u'/dev/hda2', 'mounted': False}]) def test_resynchronize(self): """ On the reactor "resynchronize" event, new swift-device-info messages should be sent. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time) def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices plugin.run() plugin.exchange() self.reactor.fire("resynchronize", scopes=["storage"]) plugin.run() plugin.exchange() messages = self.mstore.get_pending_messages() expected_message = { "type": "swift-device-info", "swift-device-info": [ {"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}]} self.assertMessages(messages, [expected_message, expected_message]) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["swift-device-info"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) def test_call_on_accepted(self): """ When message type acceptance is added for swift-device-info, send_message gets called. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices self.reactor.advance(plugin.run_interval) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.result(succeed(None)) self.mocker.count(1) # 1 send message is called for swift-device-info self.mocker.replay() self.reactor.fire( ("message-type-acceptance-changed", "swift-device-info"), True) def test_persist_deltas(self): """ Swift persistent device info drops old devices from persist storage if the device no longer exists in the current device list. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] def fake_swift_devices_no_hdf(): return [{"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices plugin.run() plugin.exchange() # To persist swift recon data self.assertEqual( plugin._persist.get("swift-device-info"), {"/dev/hdf": {"device": "/dev/hdf", "mounted": True}, "/dev/hda2": {"device": "/dev/hda2", "mounted": False}}) # Drop a device plugin._get_swift_devices = fake_swift_devices_no_hdf plugin.run() plugin.exchange() self.assertEqual( plugin._persist.get("swift-device-info"), {"/dev/hda2": {"device": "/dev/hda2", "mounted": False}}) # Run again, calling create_swift_device_info_message which purges info plugin.run() plugin.exchange() message3 = plugin.create_swift_device_info_message() self.assertIdentical(message3, None) def test_persist_timing(self): """Swift device info is only persisted when exchange happens. If an event happened between the persist and the exchange, the server didn't get the mount info at all. This test ensures that mount info are only saved when exchange happens. """ def fake_swift_devices(): return [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}] plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.monitor.add(plugin) plugin._get_swift_devices = fake_swift_devices plugin.run() message1 = plugin.create_swift_device_info_message() self.assertEqual( message1.get("swift-device-info"), [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}]) plugin.run() message2 = plugin.create_swift_device_info_message() self.assertEqual( message2.get("swift-device-info"), [{"device": "/dev/hdf", "mounted": True}, {"device": "/dev/hda2", "mounted": False}]) # Run again, calling create_swift_device_info_message which purges info plugin.run() plugin.exchange() plugin.run() message3 = plugin.create_swift_device_info_message() self.assertIdentical(message3, None) def test_wb_get_swift_devices_when_not_a_swift_node(self): """ When not a swift node, _get_swift_devices returns an empty list and no error messages. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time) self.assertEqual(plugin._get_swift_devices(), []) def test_wb_get_swift_devices_when_on_a_swift_node(self): """ When on a swift node, _get_swift_devices reports a warning if the ring files don't exist yet. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time, swift_config="/etc/hosts") logging_mock = self.mocker.replace("logging.warning") logging_mock("Swift ring files are not available yet.") self.mocker.replay() self.assertEqual(plugin._get_swift_devices(), []) def test_run_disabled_when_missing_swift_config(self): """ When on a node that doesn't have the appropriate swift config file. The plugin logs an info message and is disabled. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time, swift_config="/config/file/doesnotexist") logging_mock = self.mocker.replace("logging.info") logging_mock("This does not appear to be a swift storage server. " "'swift-device-info' plugin has been disabled.") self.mocker.replay() self.monitor.add(plugin) self.assertEqual(plugin.enabled, True) plugin.run() self.assertEqual(plugin.enabled, False) def test_wb_get_swift_devices_no_swift_python_libs_available(self): """ The plugin logs an error and doesn't find swift devices when it can't import the swift python libs which it requires. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time, swift_config="/etc/hosts", swift_ring="/etc/hosts") logging_mock = self.mocker.replace("logging.error") logging_mock("Swift python common libraries not found. " "'swift-device-info' plugin has been disabled.") self.mocker.replay() self.assertEqual(plugin._get_swift_devices(), []) def test_wb_get_swift_disk_usage_when_no_swift_service_running(self): """ When the swift service is running, but recon middleware is not active, the Swift storage usage logs an error. """ self.log_helper.ignore_errors(".*") plugin = SwiftDeviceInfo(create_time=self.reactor.time) plugin._swift_recon_url = "http://localhost:12654" result = plugin._get_swift_disk_usage() self.assertIs(None, result) self.assertIn( "Swift service not available at %s." % plugin._swift_recon_url, self.logfile.getvalue()) def test_wb_get_swift_disk_usage_when_no_recon_service_configured(self): """ When the swift service is running, but recon middleware is not active, an error is logged. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time) plugin._swift_recon_url = "http://localhost:12654" def fetch_error(url): raise HTTPCodeError(400, "invalid path: /recon/diskusage") plugin._fetch = fetch_error logging_mock = self.mocker.replace("logging.error", passthrough=False) logging_mock( "Swift service is running without swift-recon enabled. " "'swift-device-info' plugin has been disabled.") self.mocker.result(None) self.mocker.replay() result = plugin._get_swift_disk_usage() self.assertIs(None, result) def test_wb_get_swift_usage_no_information(self): """ When the swift recon service returns no disk usage information, the _get_swift_disk_usage method returns None. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time) def fetch_none(url): return None plugin._fetch = fetch_none result = plugin._get_swift_disk_usage() self.assertEqual(None, result) def test_wb_get_swift_devices_no_matched_local_service(self): """ The plugin logs an error when the swift ring file does not represent a swift service running local IP address on the current node. """ plugin = SwiftDeviceInfo(create_time=self.reactor.time, swift_config="/etc/hosts") def get_fake_ring(): return FakeRingInfo([("192.168.1.10", 6000)]) plugin._get_ring = get_fake_ring def local_network_devices(): return [{"ip_address": "10.1.2.3"}] plugin._get_network_devices = local_network_devices logging_mock = self.mocker.replace("logging.error") logging_mock("Local swift service not found. " "'swift-device-info' plugin has been disabled.") self.mocker.replay() self.assertEqual(plugin._get_swift_devices(), []) landscape-client-14.01/landscape/monitor/tests/test_aptpreferences.py0000644000175000017500000001721312301414317025743 0ustar andreasandreasimport os from twisted.internet.defer import succeed from landscape.monitor.aptpreferences import AptPreferences from landscape.tests.helpers import LandscapeTest from landscape.tests.helpers import MonitorHelper from landscape.tests.mocker import ANY class AptPreferencesTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(AptPreferencesTest, self).setUp() self.etc_apt_directory = self.makeDir() self.plugin = AptPreferences(self.etc_apt_directory) self.monitor.add(self.plugin) def test_get_data_without_apt_preferences_files(self): """ L{AptPreferences.get_data} returns C{None} if no APT preferences file is detected. """ self.assertIdentical(self.plugin.get_data(), None) def test_get_data_with_apt_preferences(self): """ L{AptPreferences.get_data} includes the contents of the main APT preferences file. """ preferences_filename = os.path.join(self.etc_apt_directory, "preferences") self.makeFile(path=preferences_filename, content="crap") self.assertEqual(self.plugin.get_data(), {preferences_filename: "crap"}) def test_get_data_with_empty_preferences_directory(self): """ L{AptPreferences.get_data} returns C{None} if the APT preference directory is present but empty. """ preferences_directory = os.path.join(self.etc_apt_directory, "preferences.d") self.makeDir(path=preferences_directory) self.assertIdentical(self.plugin.get_data(), None) def test_get_data_with_preferences_directory(self): """ L{AptPreferences.get_data} includes the contents of all the file in the APT preferences directory. """ preferences_directory = os.path.join(self.etc_apt_directory, "preferences.d") self.makeDir(path=preferences_directory) filename1 = self.makeFile(dirname=preferences_directory, content="foo") filename2 = self.makeFile(dirname=preferences_directory, content="bar") self.assertEqual(self.plugin.get_data(), {filename1: "foo", filename2: "bar"}) def test_get_data_with_one_big_file(self): """ L{AptPreferences.get_data} truncates the contents of an APT preferences files bigger than the size limit. """ preferences_filename = os.path.join(self.etc_apt_directory, "preferences") limit = self.plugin.size_limit self.makeFile(path=preferences_filename, content="a" * (limit + 1)) self.assertEqual(self.plugin.get_data(), { preferences_filename: "a" * (limit - len(preferences_filename))}) def test_get_data_with_many_big_files(self): """ L{AptPreferences.get_data} truncates the contents of individual APT preferences files in the total size is bigger than the size limit. """ preferences_directory = os.path.join(self.etc_apt_directory, "preferences.d") self.makeDir(path=preferences_directory) limit = self.plugin.size_limit filename1 = self.makeFile(dirname=preferences_directory, content="a" * (limit / 2)) filename2 = self.makeFile(dirname=preferences_directory, content="b" * (limit / 2)) self.assertEqual(self.plugin.get_data(), {filename1: "a" * (limit / 2 - len(filename1)), filename2: "b" * (limit / 2 - len(filename2))}) def test_exchange_without_apt_preferences_data(self): """ If the system has no APT preferences data, no message is sent. """ self.mstore.set_accepted_types(["apt-preferences"]) self.plugin.exchange() self.assertEqual(self.mstore.get_pending_messages(), []) def test_exchange(self): """ If the system has some APT preferences data, a message of type C{apt-preferences} is sent. If the data then gets removed, a further message with the C{data} field set to C{None} is sent. """ self.mstore.set_accepted_types(["apt-preferences"]) main_preferences_filename = os.path.join(self.etc_apt_directory, "preferences") self.makeFile(path=main_preferences_filename, content="crap") preferences_directory = os.path.join(self.etc_apt_directory, "preferences.d") self.makeDir(path=preferences_directory) sub_preferences_filename = self.makeFile(dirname=preferences_directory, content="foo") self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages[0]["type"], "apt-preferences") self.assertEqual(messages[0]["data"], {main_preferences_filename: u"crap", sub_preferences_filename: u"foo"}) for filename in messages[0]["data"]: self.assertTrue(isinstance(filename, unicode)) # Remove all APT preferences data from the system os.remove(main_preferences_filename) os.remove(sub_preferences_filename) self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages[1]["type"], "apt-preferences") self.assertIdentical(messages[1]["data"], None) def test_exchange_only_once(self): """ If the system has some APT preferences data, a message of type C{apt-preferences} is sent. If the data then gets removed, a further message with the C{data} field set to C{None} is sent. """ self.mstore.set_accepted_types(["apt-preferences"]) preferences_filename = os.path.join(self.etc_apt_directory, "preferences") self.makeFile(path=preferences_filename, content="crap") self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) def test_run(self): """ If the server can accept them, the plugin should send C{apt-preferences} urgent messages. """ self.mstore.set_accepted_types(["apt-preferences"]) broker_mock = self.mocker.replace(self.remote) broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.result(succeed(None)) self.mocker.replay() preferences_filename = os.path.join(self.etc_apt_directory, "preferences") self.makeFile(path=preferences_filename, content="crap") self.plugin.run() self.mstore.set_accepted_types([]) self.plugin.run() def test_resynchronize(self): """ The "resynchronize" reactor message cause the plugin to send fresh data. """ preferences_filename = os.path.join(self.etc_apt_directory, "preferences") self.makeFile(path=preferences_filename, content="crap") self.mstore.set_accepted_types(["apt-preferences"]) self.plugin.run() self.reactor.fire("resynchronize", scopes=["package"]) self.plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) landscape-client-14.01/landscape/monitor/tests/test_updatemanager.py0000644000175000017500000000743712301414317025561 0ustar andreasandreasfrom landscape.monitor.updatemanager import UpdateManager from landscape.tests.helpers import ( LandscapeTest, MonitorHelper, LogKeeperHelper) from landscape.tests.mocker import ANY class UpdateManagerTest(LandscapeTest): """ Tests relating to the L{UpdateManager} monitoring plug-in, which should notice changes to update-manager's configuration and report these back to landscape server. """ helpers = [MonitorHelper, LogKeeperHelper] def setUp(self): super(UpdateManagerTest, self).setUp() self.update_manager_filename = self.makeFile() self.plugin = UpdateManager(self.update_manager_filename) self.monitor.add(self.plugin) self.mstore.set_accepted_types(["update-manager-info"]) def test_get_prompt(self): """ L{UpdateManager._get_prompt} returns the value of the variable C{Prompt} in the update-manager's configuration. """ content = """ [DEFAULT] Prompt=lts """ self.makeFile(path=self.update_manager_filename, content=content) self.assertEqual("lts", self.plugin._get_prompt()) def test_get_prompt_with_invalid_value_configured(self): """ L{update_manager._get_prompt} returns "normal" if an invalid value is specified in the file. A warning is also logged. """ content = """ [DEFAULT] Prompt=zarniwhoop """ self.makeFile(path=self.update_manager_filename, content=content) self.assertEqual("normal", self.plugin._get_prompt()) def test_get_prompt_with_missing_config_file(self): """ When the configuration file does not exist we just return "normal". Any machine that doesn't have update-manager installed would fit into this category, so there's no need to warn about it. """ self.plugin.update_manager_filename = "/I/Do/Not/Exist" self.assertEqual("normal", self.plugin._get_prompt()) def test_send_message(self): """ A new C{"update-manager-info"} message should be enqueued if and only if the update-manager status of the system has changed. """ content = """ [DEFAULT] Prompt=never """ self.makeFile(path=self.update_manager_filename, content=content) self.plugin.send_message() self.assertIn("Queueing message with updated update-manager status.", self.logfile.getvalue()) self.assertMessages(self.mstore.get_pending_messages(), [{"type": "update-manager-info", "prompt": u"never"}]) self.mstore.delete_all_messages() self.plugin.send_message() self.assertMessages(self.mstore.get_pending_messages(), []) def test_run_interval(self): """ The L{UpdateManager} plugin will be scheduled to run every hour. """ self.assertEqual(3600, self.plugin.run_interval) def test_run_immediately(self): """ The L{UpdateManager} plugin will be run immediately at startup. """ self.assertTrue(True, self.plugin.run_immediately) def test_run(self): """ If the server can accept them, the plugin should send C{update-manager} messages. """ broker_mock = self.mocker.replace(self.remote) broker_mock.send_message(ANY, ANY) self.mocker.replay() self.plugin.run() self.mstore.set_accepted_types([]) self.plugin.run() def test_resynchronize(self): """ The "resynchronize" reactor message cause the plugin to send fresh data. """ self.plugin.run() self.reactor.fire("resynchronize", scopes=["package"]) self.plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) landscape-client-14.01/landscape/monitor/tests/test_loadaverage.py0000644000175000017500000001333412301414317025207 0ustar andreasandreasfrom landscape.monitor.loadaverage import LoadAverage from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY def get_load_average(): i = 1 while True: yield (float(i), 1.0, 500.238) i += 1 class LoadAveragePluginTest(LandscapeTest): helpers = [MonitorHelper] def test_real_load_average(self): """ When the load average plugin runs it calls os.getloadavg() to retrieve current load average data. This test makes sure that os.getloadavg() is called without failing and that messages with the expected datatypes are generated. """ plugin = LoadAverage(create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_message() self.assertTrue("type" in message) self.assertEqual(message["type"], "load-average") self.assertTrue("load-averages" in message) load_averages = message["load-averages"] self.assertEqual(len(load_averages), 1) load_average = load_averages[0] self.assertEqual(load_average[0], self.monitor.step_size) self.assertTrue(isinstance(load_average[0], int)) self.assertTrue(isinstance(load_average[1], float)) def test_sample_load_average(self): """ Sample data is used to ensure that the load average included in the message is calculated correctly. """ get_load_average = lambda: (0.15, 1, 500) plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_message() load_averages = message["load-averages"] self.assertEqual(len(load_averages), 1) self.assertEqual(load_averages[0], (self.monitor.step_size, 0.15)) def test_ranges_remain_contiguous_after_flush(self): """ The load average plugin uses the accumulate function to queue messages. Timestamps should always be contiguous, and always fall on a step boundary. """ plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) for i in range(1, 10): self.reactor.advance(self.monitor.step_size) message = plugin.create_message() load_averages = message["load-averages"] self.assertEqual(len(load_averages), 1) self.assertEqual(load_averages[0][0], self.monitor.step_size * i) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, a message with an empty C{load-averages} list is expected. """ plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_message() self.assertEqual(len(message["load-averages"]), 1) message = plugin.create_message() self.assertEqual(len(message["load-averages"]), 0) def test_never_exchange_empty_messages(self): """ The plugin will create a message with an empty C{load-averages} list when no data is available. If an empty message is created during exchange, it should not be queued. """ self.mstore.set_accepted_types(["load-average"]) plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_exchange_messages(self): """ The load average plugin queues message when manager.exchange() is called. Each message should be aligned to a step boundary; messages collected bewteen exchange periods should be delivered in a single message. """ self.mstore.set_accepted_types(["load-average"]) plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "load-average", "load-averages": [(300, 10.5), (600, 30.5)]}]) def test_call_on_accepted(self): plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 1) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.reactor.fire(("message-type-acceptance-changed", "load-average"), True) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ plugin = LoadAverage(create_time=self.reactor.time, get_load_average=get_load_average().next) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["load-average"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) landscape-client-14.01/landscape/monitor/tests/test_processorinfo.py0000644000175000017500000004475212301414317025640 0ustar andreasandreasfrom landscape.plugin import PluginConfigError from landscape.monitor.processorinfo import ProcessorInfo from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY # The extra blank line at the bottom of some sample data definitions # is intentional. class ProcessorInfoTest(LandscapeTest): """Tests for CPU info plugin.""" helpers = [MonitorHelper] def test_unknown_machine_name(self): """Ensure a PluginConfigError is raised for unknown machines.""" self.assertRaises(PluginConfigError, lambda: ProcessorInfo(machine_name="wubble")) def test_read_proc_cpuinfo(self): """Ensure the plugin can parse /proc/cpuinfo.""" message = ProcessorInfo().create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(message["processors"] > 0) for processor in message["processors"]: self.assertTrue("processor-id" in processor) self.assertTrue("model" in processor) def test_call_on_accepted(self): plugin = ProcessorInfo() self.monitor.add(plugin) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.reactor.fire( ("message-type-acceptance-changed", "processor-info"), True) class ResynchTest(LandscapeTest): helpers = [MonitorHelper] def test_resynchronize(self): """ The "resynchronize" reactor message should cause the plugin to send fresh data. """ self.mstore.set_accepted_types(["processor-info"]) plugin = ProcessorInfo() self.monitor.add(plugin) plugin.run() self.reactor.fire("resynchronize", scopes=["cpu"]) plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) class PowerPCMessageTest(LandscapeTest): """Tests for powerpc-specific message builder.""" helpers = [MonitorHelper] SMP_PPC_G5 = """ processor : 0 cpu : PPC970FX, altivec supported clock : 2500.000000MHz revision : 3.0 (pvr 003c 0300) processor : 1 cpu : PPC970FX, altivec supported clock : 2500.000000MHz revision : 3.0 (pvr 003c 0300) timebase : 33333333 machine : PowerMac7,3 motherboard : PowerMac7,3 MacRISC4 Power Macintosh detected as : 336 (PowerMac G5) pmac flags : 00000000 L2 cache : 512K unified pmac-generation : NewWorld """ UP_PPC_G4 = """ processor : 0 cpu : 7447A, altivec supported clock : 666.666000MHz revision : 0.1 (pvr 8003 0101) bogomips : 36.73 timebase : 18432000 machine : PowerBook5,4 motherboard : PowerBook5,4 MacRISC3 Power Macintosh detected as : 287 (PowerBook G4 15") pmac flags : 0000001b L2 cache : 512K unified pmac-generation : NewWorld """ def setUp(self): LandscapeTest.setUp(self) self.mstore.set_accepted_types(["processor-info"]) def test_read_sample_ppc_g5_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual PowerPC G5.""" filename = self.makeFile(self.SMP_PPC_G5) plugin = ProcessorInfo(machine_name="ppc64", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) self.assertEqual(processor_0["processor-id"], 0) self.assertEqual(processor_0["model"], "PPC970FX, altivec supported") processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) self.assertEqual(processor_1["processor-id"], 1) self.assertEqual(processor_1["model"], "PPC970FX, altivec supported") def test_ppc_g5_cpu_info_same_as_last_known_cpu_info(self): """Test that one message is queued for duplicate G5 CPU info.""" filename = self.makeFile(self.SMP_PPC_G5) plugin = ProcessorInfo(delay=0.1, machine_name="ppc64", source_filename=filename) self.monitor.add(plugin) plugin.run() plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) message = messages[0] self.assertEqual(message["type"], "processor-info") self.assertEqual(len(message["processors"]), 2) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) self.assertEqual(processor_0["model"], "PPC970FX, altivec supported") self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) self.assertEqual(processor_1["model"], "PPC970FX, altivec supported") self.assertEqual(processor_1["processor-id"], 1) def test_read_sample_ppc_g4_data(self): """Ensure the plugin can parse /proc/cpuinfo from a G4 PowerBook.""" filename = self.makeFile(self.UP_PPC_G4) plugin = ProcessorInfo(machine_name="ppc", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor = message["processors"][0] self.assertEqual(len(processor), 2) self.assertEqual(processor["processor-id"], 0) self.assertEqual(processor["model"], "7447A, altivec supported") class ARMMessageTest(LandscapeTest): """Tests for ARM-specific message builder.""" helpers = [MonitorHelper] ARM_NOKIA = """ Processor : ARMv6-compatible processor rev 2 (v6l) BogoMIPS : 164.36 Features : swp half thumb fastmult vfp edsp java CPU implementer : 0x41 CPU architecture: 6TEJ CPU variant : 0x0 CPU part : 0xb36 CPU revision : 2 Cache type : write-back Cache clean : cp15 c7 ops Cache lockdown : format C Cache format : Harvard I size : 32768 I assoc : 4 I line length : 32 I sets : 256 D size : 32768 D assoc : 4 D line length : 32 D sets : 256 Hardware : Nokia RX-44 Revision : 24202524 Serial : 0000000000000000 """ ARMv7 = """ Processor : ARMv7 Processor rev 1 (v7l) BogoMIPS : 663.55 Features : swp half thumb fastmult vfp edsp CPU implementer : 0x41 CPU architecture: 7 CPU variant : 0x2 CPU part : 0xc08 CPU revision : 1 Cache type : write-back Cache clean : read-block Cache lockdown : not supported Cache format : Unified Cache size : 768 Cache assoc : 1 Cache line length : 8 Cache sets : 64 Hardware : Sample Board Revision : 81029 Serial : 0000000000000000 """ ARMv7_reverse = """ Serial : 0000000000000000 Revision : 81029 Hardware : Sample Board Cache sets : 64 Cache line length : 8 Cache assoc : 1 Cache size : 768 Cache format : Unified Cache lockdown : not supported Cache clean : read-block Cache type : write-back CPU revision : 1 CPU part : 0xc08 CPU variant : 0x2 CPU architecture: 7 CPU implementer : 0x41 Features : swp half thumb fastmult vfp edsp BogoMIPS : 663.55 Processor : ARMv7 Processor rev 1 (v7l) """ def test_read_sample_nokia_data(self): """Ensure the plugin can parse /proc/cpuinfo from a Nokia N810.""" filename = self.makeFile(self.ARM_NOKIA) plugin = ProcessorInfo(machine_name="armv6l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) self.assertEqual(processor_0["model"], "ARMv6-compatible processor rev 2 (v6l)") self.assertEqual(processor_0["processor-id"], 0) def test_read_sample_armv7_data(self): """Ensure the plugin can parse /proc/cpuinfo from a sample ARMv7.""" filename = self.makeFile(self.ARMv7) plugin = ProcessorInfo(machine_name="armv7l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 3) self.assertEqual(processor_0["model"], "ARMv7 Processor rev 1 (v7l)") self.assertEqual(processor_0["processor-id"], 0) self.assertEqual(processor_0["cache-size"], 768) def test_read_sample_armv7_reverse_data(self): """Ensure the plugin can parse a reversed sample ARMv7 /proc/cpuinfo""" filename = self.makeFile(self.ARMv7_reverse) plugin = ProcessorInfo(machine_name="armv7l", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 3) self.assertEqual(processor_0["model"], "ARMv7 Processor rev 1 (v7l)") self.assertEqual(processor_0["processor-id"], 0) self.assertEqual(processor_0["cache-size"], 768) class SparcMessageTest(LandscapeTest): """Tests for sparc-specific message builder.""" helpers = [MonitorHelper] SMP_SPARC = """ cpu : TI UltraSparc IIIi (Jalapeno) fpu : UltraSparc IIIi integrated FPU prom : OBP 4.16.2 2004/10/04 18:22 type : sun4u ncpus probed : 2 ncpus active : 2 D$ parity tl1 : 0 I$ parity tl1 : 0 Cpu0Bogo : 24.00 Cpu0ClkTck : 000000004fa1be00 Cpu1Bogo : 24.00 Cpu1ClkTck : 000000004fa1be00 MMU Type : Cheetah+ State: CPU0: online CPU1: online """ def test_read_sample_sparc_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual UltraSparc.""" filename = self.makeFile(self.SMP_SPARC) plugin = ProcessorInfo(machine_name="sparc64", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 2) self.assertEqual(processor_0["model"], "TI UltraSparc IIIi (Jalapeno)") self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 2) self.assertEqual(processor_1["model"], "TI UltraSparc IIIi (Jalapeno)") self.assertEqual(processor_1["processor-id"], 1) class X86MessageTest(LandscapeTest): """Test for x86-specific message handling.""" helpers = [MonitorHelper] SMP_OPTERON = """ processor : 0 vendor_id : AuthenticAMD cpu family : 15 model : 37 model name : AMD Opteron(tm) Processor 250 stepping : 1 cpu MHz : 2405.489 cache size : 1024 KB fpu : yes fpu_exception : yes cpuid level : 1 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt lm 3dnowext 3dnow pni bogomips : 4718.59 TLB size : 1024 4K pages clflush size : 64 cache_alignment : 64 address sizes : 40 bits physical, 48 bits virtual power management: ts fid vid ttp processor : 1 vendor_id : AuthenticAMD cpu family : 15 model : 37 model name : AMD Opteron(tm) Processor 250 stepping : 1 cpu MHz : 2405.489 cache size : 1024 KB fpu : yes fpu_exception : yes cpuid level : 1 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt lm 3dnowext 3dnow pni bogomips : 4800.51 TLB size : 1024 4K pages clflush size : 64 cache_alignment : 64 address sizes : 40 bits physical, 48 bits virtual power management: ts fid vid ttp """ UP_PENTIUM_M = """ processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 13 model name : Intel(R) Pentium(R) M processor 1.50GHz stepping : 8 cpu MHz : 598.834 cache size : 2048 KB fdiv_bug : no hlt_bug : no f00f_bug : no coma_bug : no fpu : yes fpu_exception : yes cpuid level : 2 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat clflush dts acpi mmx fxsr sse sse2 ss tm pbe nx est tm2 bogomips : 1198.25 """ def setUp(self): LandscapeTest.setUp(self) self.mstore.set_accepted_types(["processor-info"]) def test_read_sample_opteron_data(self): """Ensure the plugin can parse /proc/cpuinfo from a dual Opteron.""" filename = self.makeFile(self.SMP_OPTERON) plugin = ProcessorInfo(machine_name="x86_64", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 2) processor_0 = message["processors"][0] self.assertEqual(len(processor_0), 4) self.assertEqual(processor_0["vendor"], "AuthenticAMD") self.assertEqual(processor_0["model"], "AMD Opteron(tm) Processor 250") self.assertEqual(processor_0["cache-size"], 1024) self.assertEqual(processor_0["processor-id"], 0) processor_1 = message["processors"][1] self.assertEqual(len(processor_1), 4) self.assertEqual(processor_1["vendor"], "AuthenticAMD") self.assertEqual(processor_1["model"], "AMD Opteron(tm) Processor 250") self.assertEqual(processor_1["cache-size"], 1024) self.assertEqual(processor_1["processor-id"], 1) def test_plugin_manager(self): """Test plugin manager integration.""" filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(delay=0.1, machine_name="i686", source_filename=filename) self.monitor.add(plugin) self.reactor.advance(0.5) self.monitor.exchange() self.assertMessages( self.mstore.get_pending_messages(), [{"type": "processor-info", "processors": [ {"vendor": "GenuineIntel", "model": "Intel(R) Pentium(R) M processor 1.50GHz", "cache-size": 2048, "processor-id": 0}], }]) def test_read_sample_pentium_m_data(self): """Ensure the plugin can parse /proc/cpuinfo from a Pentium-M.""" filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(machine_name="i686", source_filename=filename) message = plugin.create_message() self.assertEqual(message["type"], "processor-info") self.assertTrue(len(message["processors"]) == 1) processor = message["processors"][0] self.assertEqual(len(processor), 4) self.assertEqual(processor["vendor"], "GenuineIntel") self.assertEqual(processor["model"], "Intel(R) Pentium(R) M processor 1.50GHz") self.assertEqual(processor["cache-size"], 2048) self.assertEqual(processor["processor-id"], 0) def test_pentium_m_cpu_info_same_as_last_known_cpu_info(self): """Test that one message is queued for duplicate Pentium-M CPU info.""" filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(delay=0.1, machine_name="i686", source_filename=filename) self.monitor.add(plugin) self.monitor.add(plugin) self.reactor.call_later(0.5, self.reactor.stop) self.reactor.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) message = messages[0] self.assertEqual(message["type"], "processor-info") self.assertEqual(len(message["processors"]), 1) processor = message["processors"][0] self.assertEqual(len(processor), 4) self.assertEqual(processor["vendor"], "GenuineIntel") self.assertEqual(processor["model"], "Intel(R) Pentium(R) M processor 1.50GHz") self.assertEqual(processor["cache-size"], 2048) self.assertEqual(processor["processor-id"], 0) def test_unchanging_data(self): filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(delay=0.1, machine_name="i686", source_filename=filename) self.monitor.add(plugin) plugin.run() plugin.run() self.assertEqual(len(self.mstore.get_pending_messages()), 1) def test_changing_data(self): filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(delay=0.1, machine_name="i686", source_filename=filename) self.monitor.add(plugin) plugin.run() self.makeFile(self.SMP_OPTERON, path=filename) plugin.run() self.assertEqual(len(self.mstore.get_pending_messages()), 2) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) filename = self.makeFile(self.UP_PENTIUM_M) plugin = ProcessorInfo(delay=0.1, machine_name="i686", source_filename=filename) self.monitor.add(plugin) self.mstore.set_accepted_types(["processor-info"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) landscape-client-14.01/landscape/monitor/tests/test_plugin.py0000644000175000017500000001504012301414317024227 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.monitor.plugin import MonitorPlugin, DataWatcher from landscape.schema import Message, Int from landscape.tests.mocker import ANY from landscape.tests.helpers import ( LandscapeTest, MonitorHelper, LogKeeperHelper) class MonitorPluginTest(LandscapeTest): helpers = [MonitorHelper] def test_without_persist_name(self): """ By default a L{MonitorPlugin} doesn't have a C{_persist} attribute. """ plugin = MonitorPlugin() plugin.register(self.monitor) self.assertIs(plugin.persist, None) def test_with_persist_name(self): """ When plugins providea C{persist_name} attribute, they get a persist object set at C{_persist} which is rooted at the name specified. """ plugin = MonitorPlugin() plugin.persist_name = "wubble" plugin.register(self.monitor) plugin.persist.set("hi", "there") self.assertEqual(self.monitor.persist.get("wubble"), {"hi": "there"}) def test_with_no_run_interval(self): """ If the C{run_interval} attribute of L{MonitorPlugin} is C{None}, its C{run} method won't get called by the reactor. """ plugin = MonitorPlugin() plugin.run = lambda: 1 / 0 plugin.run_interval = None plugin.register(self.monitor) self.reactor.advance(MonitorPlugin.run_interval) def test_call_on_accepted(self): """ L{MonitorPlugin}-based plugins can provide a callable to call when a message type becomes accepted. """ plugin = MonitorPlugin() plugin.register(self.monitor) callback = self.mocker.mock() callback("foo", kwarg="bar") self.mocker.replay() plugin.call_on_accepted("type", callback, "foo", kwarg="bar") self.reactor.fire(("message-type-acceptance-changed", "type"), True) def test_call_on_accepted_when_unaccepted(self): """ Notifications are only dispatched to plugins when types become accepted, not when they become unaccepted. """ plugin = MonitorPlugin() plugin.register(self.monitor) callback = lambda: 1 / 0 plugin.call_on_accepted("type", callback) self.reactor.fire(("message-type-acceptance-changed", "type"), False) def test_resynchronize_with_global_scope(self): """ If a 'resynchronize' event fires with global scope, we clear down the persist. """ plugin = MonitorPlugin() plugin.persist_name = "wubble" plugin.register(self.monitor) plugin.persist.set("hi", "there") self.assertEqual(self.monitor.persist.get("wubble"), {"hi": "there"}) self.reactor.fire("resynchronize") self.assertTrue(self.monitor.persist.get("wubble") is None) def test_resynchronize_with_provided_scope(self): """ If a 'resynchronize' event fires with the provided scope, we clear down the persist. """ plugin = MonitorPlugin() plugin.persist_name = "wubble" plugin.scope = "frujical" plugin.register(self.monitor) plugin.persist.set("hi", "there") self.assertEqual(self.monitor.persist.get("wubble"), {"hi": "there"}) self.reactor.fire("resynchronize", scopes=["frujical"]) self.assertTrue(self.monitor.persist.get("wubble") is None) def test_do_not_resynchronize_with_other_scope(self): """ If a 'resynchronize' event fires with an irrelevant scope, we do nothing. """ plugin = MonitorPlugin() plugin.persist_name = "wubble" plugin.scope = "frujical" plugin.register(self.monitor) plugin.persist.set("hi", "there") self.assertEqual(self.monitor.persist.get("wubble"), {"hi": "there"}) self.reactor.fire("resynchronize", scopes=["chrutfup"]) self.assertEqual(self.monitor.persist.get("wubble"), {"hi": "there"}) class StubDataWatchingPlugin(DataWatcher): persist_name = "ooga" message_type = "wubble" message_key = "wubblestuff" def __init__(self, data=None): self.data = data def get_data(self): return self.data class DataWatcherTest(LandscapeTest): helpers = [MonitorHelper, LogKeeperHelper] def setUp(self): LandscapeTest.setUp(self) self.plugin = StubDataWatchingPlugin(1) self.plugin.register(self.monitor) self.mstore.add_schema(Message("wubble", {"wubblestuff": Int()})) def test_get_message(self): self.assertEqual(self.plugin.get_message(), {"type": "wubble", "wubblestuff": 1}) def test_get_message_unchanging(self): self.assertEqual(self.plugin.get_message(), {"type": "wubble", "wubblestuff": 1}) self.assertEqual(self.plugin.get_message(), None) def test_basic_exchange(self): # Is this really want we want to do? self.mstore.set_accepted_types(["wubble"]) self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages[0]["type"], "wubble") self.assertEqual(messages[0]["wubblestuff"], 1) self.assertIn("Queueing a message with updated data watcher info for " "landscape.monitor.tests.test_plugin.StubDataWatching" "Plugin.", self.logfile.getvalue()) def test_unchanging_value(self): # Is this really want we want to do? self.mstore.set_accepted_types(["wubble"]) self.plugin.exchange() self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) def test_urgent_exchange(self): """ When exchange is called with an urgent argument set to True make sure it sends the message urgently. """ remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.result(succeed(None)) self.mocker.replay() self.mstore.set_accepted_types(["wubble"]) self.plugin.exchange(True) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["wubble"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) landscape-client-14.01/landscape/monitor/tests/test_networkactivity.py0000644000175000017500000002052212301414317026200 0ustar andreasandreasimport socket from landscape.monitor.networkactivity import NetworkActivity from landscape.tests.helpers import LandscapeTest, MonitorHelper class NetworkActivityTest(LandscapeTest): helpers = [MonitorHelper] stats_template = """\ Inter-| Receive | Transmit face |bytes packets compressed multicast|bytes packets errs drop fifo lo:%(lo_in)d %(lo_in_p)d 0 0 %(lo_out)d %(lo_out_p)d 0 0 0 eth0: %(eth0_in)d 12539 0 62 %(eth0_out)d 12579 0 0 0 %(extra)s """ def setUp(self): super(NetworkActivityTest, self).setUp() self.activity_file = open(self.makeFile(), "w+") self.write_activity() self.plugin = NetworkActivity( network_activity_file=self.activity_file.name, create_time=self.reactor.time) self.monitor.add(self.plugin) def tearDown(self): self.activity_file.close() super(NetworkActivityTest, self).tearDown() def write_activity(self, lo_in=0, lo_out=0, eth0_in=0, eth0_out=0, extra="", lo_in_p=0, lo_out_p=0, **kw): kw.update(dict( lo_in=lo_in, lo_out=lo_out, lo_in_p=lo_in_p, lo_out_p=lo_out_p, eth0_in=eth0_in, eth0_out=eth0_out, extra=extra)) self.activity_file.seek(0, 0) self.activity_file.truncate() self.activity_file.write(self.stats_template % kw) self.activity_file.flush() def test_read_proc_net_dev(self): """ When the network activity plugin runs it reads data from /proc/net/dev which it parses and accumulates to read values. This test ensures that /proc/net/dev is always parseable and that messages are in the expected format and contain data with expected datatypes. """ plugin = NetworkActivity(create_time=self.reactor.time) self.monitor.add(plugin) plugin.run() self.reactor.advance(self.monitor.step_size) # hmmm. try to connect anywhere to advance the net stats try: socket.socket().connect(("localhost", 9999)) except socket.error: pass plugin.run() message = plugin.create_message() self.assertTrue(message) def test_message_contents(self): """ The network plugin sends messages with the traffic delta along with the step per network interface. Only interfaces which have deltas are present in the message. """ self.write_activity(lo_in=2000, lo_out=1900) self.plugin.run() self.reactor.advance(self.monitor.step_size) self.write_activity(lo_in=2010, lo_out=1999) self.plugin.run() message = self.plugin.create_message() self.assertTrue(message) self.assertTrue("type" in message) self.assertEqual(message["type"], "network-activity") self.assertEqual(message["activities"]["lo"], [(300, 10, 99)]) self.assertNotIn("eth0", message["activities"]) def test_proc_rollover(self): """ If /proc/net/dev rollovers, the network plugin handles the value and gives a positive value instead. """ self.plugin._rollover_maxint = 10000 self.write_activity(lo_in=2000, lo_out=1900) self.plugin.run() self.reactor.advance(self.monitor.step_size) self.write_activity(lo_in=1010, lo_out=999) self.plugin.run() message = self.plugin.create_message() self.assertTrue(message) self.assertTrue("type" in message) self.assertEqual(message["type"], "network-activity") self.assertEqual(message["activities"]["lo"], [(300, 9010, 9099)]) self.assertNotIn("eth0", message["activities"]) def test_no_message_without_traffic_delta(self): """ If no traffic delta is detected between runs, no message will be generated by the plugin. """ self.plugin.run() self.reactor.advance(self.monitor.step_size) message = self.plugin.create_message() self.assertFalse(message) self.plugin.run() message = self.plugin.create_message() self.assertFalse(message) def test_no_message_without_traffic_delta_across_steps(self): """ A traffic delta needs to cross step boundaries before a message is generated. """ self.plugin.run() self.write_activity(lo_out=1000, eth0_out=1000) self.reactor.advance(self.monitor.step_size) message = self.plugin.create_message() self.assertFalse(message) def test_interface_temporarily_disappears(self): """ When an interface is removed (ie usb hotplug) and then activated again its delta will not be retained, because the values may have been reset. """ self.write_activity(extra="wlan0: 2222 0 0 0 2222 0 0 0 0") self.plugin.run() self.reactor.advance(self.monitor.step_size) self.write_activity() self.plugin.run() message = self.plugin.create_message() self.assertFalse(message) self.write_activity(extra="wlan0: 1000 0 0 0 1000 0 0 0 0") self.reactor.advance(self.monitor.step_size) self.plugin.run() message = self.plugin.create_message() self.assertFalse(message) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, no message is created. """ self.plugin.run() self.reactor.advance(self.monitor.step_size) self.write_activity(eth0_out=1111) self.plugin.run() message = self.plugin.create_message() self.assertTrue(message) message = self.plugin.create_message() self.assertFalse(message) def test_exchange_no_message(self): """ No message is sent to the exchange if there isn't a traffic delta. """ self.reactor.advance(self.monitor.step_size) self.mstore.set_accepted_types([self.plugin.message_type]) self.plugin.exchange() self.assertFalse(self.mstore.count_pending_messages()) def test_exchange_messages(self): """ The network plugin queues message when an exchange happens. Each message should be aligned to a step boundary; messages collected between exchange periods should be delivered in a single message. """ self.reactor.advance(self.monitor.step_size) self.write_activity(lo_out=1000, eth0_out=1000) self.plugin.run() self.mstore.set_accepted_types([self.plugin.message_type]) self.plugin.exchange() step_size = self.monitor.step_size self.assertMessages(self.mstore.get_pending_messages(), [{"type": "network-activity", "activities": { "lo": [(step_size, 0, 1000)], "eth0": [(step_size, 0, 1000)]}}]) def test_config(self): """The network activity plugin is enabled by default.""" self.assertIn("NetworkActivity", self.config.plugin_factories) def test_limit_amount_of_items(self): """ The network plugin doesn't send too many items at once in a single network message, to not crush the server. """ def extra(data): result = "" for i in range(50): result += ( """eth%d: %d 12539 0 62 %d 12579 0 0 0\n """ % (i, data, data)) return result for i in range(1, 10): data = i * 1000 self.write_activity(lo_out=data, eth0_out=data, extra=extra(data)) self.plugin.run() self.reactor.advance(self.monitor.step_size) # We have created 408 items. It should be sent in 3 messages. message = self.plugin.create_message() items = sum(len(i) for i in message["activities"].values()) self.assertEqual(200, items) message = self.plugin.create_message() items = sum(len(i) for i in message["activities"].values()) self.assertEqual(200, items) message = self.plugin.create_message() items = sum(len(i) for i in message["activities"].values()) self.assertEqual(8, items) landscape-client-14.01/landscape/monitor/tests/test_mountinfo.py0000644000175000017500000006334112301414317024756 0ustar andreasandreasimport tempfile from twisted.internet.defer import succeed from landscape.monitor.mountinfo import MountInfo from landscape.tests.helpers import LandscapeTest, mock_counter, MonitorHelper from landscape.tests.mocker import ANY mb = lambda x: x * 1024 * 1024 class MountInfoTest(LandscapeTest): """Tests for mount-info plugin.""" helpers = [MonitorHelper] def setUp(self): LandscapeTest.setUp(self) self.mstore.set_accepted_types(["mount-info", "free-space"]) self.log_helper.ignore_errors("Typelib file for namespace") def get_mount_info(self, *args, **kwargs): if "statvfs" not in kwargs: kwargs["statvfs"] = lambda path: (0,) * 1000 plugin = MountInfo(*args, **kwargs) # To make sure tests are isolated from the real system by default. plugin.is_device_removable = lambda x: False return plugin def test_read_proc_mounts(self): """ When the mount info plugin runs it reads data from /proc/mounts to discover mounts and calls os.statvfs() to retrieve current data for each mount point. This test makes sure that os.statvfs() is called without failing, that /proc/mounts is readable, and that messages with the expected datatypes are generated. """ plugin = self.get_mount_info(create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_mount_info_message() self.assertTrue(message) self.assertEqual(message["type"], "mount-info") self.assertTrue("mount-info" in message) self.assertTrue(len(message["mount-info"]) > 0) keys = set(["filesystem", "total-space", "device", "mount-point"]) for now, mount_info in message["mount-info"]: self.assertEqual(set(mount_info.keys()), keys) self.assertTrue(isinstance(mount_info["filesystem"], basestring)) self.assertTrue(isinstance(mount_info["device"], basestring)) self.assertTrue(isinstance(mount_info["total-space"], (int, long))) self.assertTrue(isinstance(mount_info["mount-point"], basestring)) def test_read_sample_data(self): """ Sample data is used to ensure that the free space included in the message is calculated correctly. """ def statvfs(path): if path == "/": return (4096, 0, mb(1000L), mb(100L), 0L, 0L, 0L, 0, 0) else: return (4096, 0, mb(10000L), mb(1000L), 0L, 0L, 0L, 0, 0) filename = self.makeFile("""\ rootfs / rootfs rw 0 0 none /dev ramfs rw 0 0 /dev/hda1 / ext3 rw 0 0 /dev/hda1 /dev/.static/dev ext3 rw 0 0 proc /proc proc rw,nodiratime 0 0 sysfs /sys sysfs rw 0 0 usbfs /proc/bus/usb usbfs rw 0 0 devpts /dev/pts devpts rw 0 0 tmpfs /dev/shm tmpfs rw 0 0 tmpfs /lib/modules/2.6.12-10-386/volatile tmpfs rw 0 0 /dev/hde1 /mnt/hde1 reiserfs rw 0 0 /dev/hde1 /mnt/bind reiserfs rw 0 0 /dev/sdb2 /media/Boot\\040OSX hfsplus nls=utf8 0 0 """) mtab_filename = self.makeFile("""\ rootfs / rootfs rw 0 0 none /dev ramfs rw 0 0 /dev/hda1 / ext3 rw 0 0 /dev/hda1 /dev/.static/dev ext3 rw 0 0 proc /proc proc rw,nodiratime 0 0 sysfs /sys sysfs rw 0 0 usbfs /proc/bus/usb usbfs rw 0 0 devpts /dev/pts devpts rw 0 0 tmpfs /dev/shm tmpfs rw 0 0 tmpfs /lib/modules/2.6.12-10-386/volatile tmpfs rw 0 0 /dev/hde1 /mnt/hde1 reiserfs rw 0 0 /dev/hde1 /mnt/bind none rw,bind 0 0 /dev/sdb2 /media/Boot\\040OSX hfsplus rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=mtab_filename) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_mount_info_message() self.assertTrue(message) self.assertEqual(message["type"], "mount-info") mount_info = message.get("mount-info", ()) self.assertEqual(len(mount_info), 3) self.assertEqual(mount_info[0][1], {"device": "/dev/hda1", "mount-point": "/", "filesystem": "ext3", "total-space": 4096000}) self.assertEqual(mount_info[1][1], {"device": "/dev/hde1", "mount-point": "/mnt/hde1", "filesystem": "reiserfs", "total-space": 40960000}) self.assertEqual( mount_info[2][1], {"device": "/dev/sdb2", "mount-point": "/media/Boot OSX", "filesystem": "hfsplus", "total-space": 40960000}) def test_read_changing_total_space(self): """ Total space measurements are only sent when (a) none have ever been sent, or (b) the value has changed since the last time data was collected. The test sets the mount info plugin interval to the same value as the step size and advances the reactor such that the plugin will be run twice. Each time it runs it gets a different value from our sample statvfs() function which should cause it to queue new messages. """ def statvfs(path, multiplier=mock_counter(1).next): return (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, interval=self.monitor.step_size, mtab_file=filename) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) message = plugin.create_mount_info_message() mount_info = message["mount-info"] self.assertEqual(len(mount_info), 2) for i, total_space in enumerate([4096000, 8192000]): self.assertEqual(mount_info[i][0], (i + 1) * self.monitor.step_size) self.assertEqual(mount_info[i][1], {"device": "/dev/hda1", "filesystem": "ext3", "mount-point": "/", "total-space": total_space}) def test_read_disjointed_changing_total_space(self): """ Total space measurements are only sent when (a) none have ever been sent, or (b) the value has changed since the last time data was collected. This test ensures that the (b) criteria is checked per-mount point. The sample statvfs() function only provides changing total space for /; therefore, new messages should only be queued for / after the first message is created. """ def statvfs(path, multiplier=mock_counter(1).next): if path == "/": return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) return (4096, 0, mb(multiplier() * 1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 /dev/hde1 /mnt/hde1 ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, interval=self.monitor.step_size, mtab_file=filename) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) message = plugin.create_mount_info_message() self.assertTrue(message) mount_info = message.get("mount-info", ()) self.assertEqual(len(mount_info), 3) self.assertEqual(mount_info[0][0], self.monitor.step_size) self.assertEqual(mount_info[0][1], {"device": "/dev/hda1", "mount-point": "/", "filesystem": "ext3", "total-space": 4096000}) self.assertEqual(mount_info[1][0], self.monitor.step_size) self.assertEqual(mount_info[1][1], {"device": "/dev/hde1", "mount-point": "/mnt/hde1", "filesystem": "ext3", "total-space": 4096000}) self.assertEqual(mount_info[2][0], self.monitor.step_size * 2) self.assertEqual(mount_info[2][1], {"device": "/dev/hde1", "mount-point": "/mnt/hde1", "filesystem": "ext3", "total-space": 8192000}) def test_exchange_messages(self): """ The mount_info plugin queues message when manager.exchange() is called. Each message should be aligned to a step boundary; messages collected bewteen exchange periods should be delivered in a single message. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) step_size = self.monitor.step_size self.monitor.add(plugin) # Exchange should trigger a flush of the persist database registry_mocker = self.mocker.replace(plugin.registry) registry_mocker.flush() self.mocker.result(None) self.mocker.replay() self.reactor.advance(step_size * 2) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) message = [d for d in messages if d["type"] == "free-space"][0] free_space = message["free-space"] for i in range(len(free_space)): self.assertEqual(free_space[i][0], (i + 1) * step_size) self.assertEqual(free_space[i][1], "/") self.assertEqual(free_space[i][2], 409600) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, None will be returned when messages are created. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) messages = plugin.create_messages() self.assertEqual(len(messages), 2) messages = plugin.create_messages() self.assertEqual(len(messages), 0) def test_read_multi_bound_mounts(self): """ The mount info plugin should handle multi-bound mount points by reporting them only once. In practice, this test doesn't really test anything since the current behaviour is to ignore any mount point for which the device doesn't start with /dev. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hdc4 /mm xfs rw 0 0 /mm/ubuntu-mirror /home/dchroot/warty/mirror none bind 0 0 /mm/ubuntu-mirror /home/dchroot/hoary/mirror none bind 0 0 /mm/ubuntu-mirror /home/dchroot/breezy/mirror none bind 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) message = plugin.create_mount_info_message() self.assertTrue(message) mount_info = message.get("mount-info", ()) self.assertEqual(len(mount_info), 1) self.assertEqual(mount_info[0][0], step_size) self.assertEqual(mount_info[0][1], {"device": "/dev/hdc4", "mount-point": "/mm", "filesystem": "xfs", "total-space": 4096000}) def test_ignore_nfs_mounts(self): """ The mount info plugin should only report data about local mount points. """ filename = self.makeFile("""\ ennui:/data /data nfs rw,v3,rsize=32768,wsize=32768,hard,lock,proto=udp,\ addr=ennui 0 0 """) plugin = self.get_mount_info(mounts_file=filename, mtab_file=filename) self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() self.assertEqual(message, None) def test_ignore_removable_partitions(self): """ "Removable" partitions are not reported to the server. """ filename = self.makeFile("""\ /dev/hdc4 /mm xfs rw 0 0""") plugin = self.get_mount_info(mounts_file=filename, mtab_file=filename) plugin.is_device_removable = lambda x: True # They are all removable self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() self.assertEqual(message, None) def test_sample_free_space(self): """Test collecting information about free space.""" def statvfs(path, multiplier=mock_counter(1).next): return (4096, 0, mb(1000), mb(multiplier() * 100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda2 / xfs rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) message = plugin.create_free_space_message() self.assertTrue(message) self.assertEqual(message.get("type"), "free-space") free_space = message.get("free-space", ()) self.assertEqual(len(free_space), 1) self.assertEqual(free_space[0], (step_size, "/", 409600)) def test_never_exchange_empty_messages(self): """ When the plugin has no data, it's various create_X_message() methods will return None. Empty or null messages should never be queued. """ self.mstore.set_accepted_types(["load-average"]) filename = self.makeFile("") plugin = self.get_mount_info(mounts_file=filename, mtab_file=filename) self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_messages(self): """ Test ensures all expected messages are created and contain the right datatypes. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda2 / xfs rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) self.assertEqual(messages[0].get("mount-info"), [(step_size, {"device": "/dev/hda2", "mount-point": "/", "filesystem": "xfs", "total-space": 4096000})]) self.assertEqual(messages[1].get("free-space"), [(step_size, "/", 409600)]) self.assertTrue(isinstance(messages[1]["free-space"][0][2], (int, long))) def test_resynchronize(self): """ On the reactor "resynchronize" event, new mount-info messages should be sent. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, create_time=self.reactor.time, statvfs=statvfs, mtab_file=filename) self.monitor.add(plugin) plugin.run() plugin.exchange() self.reactor.fire("resynchronize", scopes=["disk"]) plugin.run() plugin.exchange() messages = self.mstore.get_pending_messages() messages = [message for message in messages if message["type"] == "mount-info"] expected_message = { "type": "mount-info", "mount-info": [(0, {"device": "/dev/hda1", "mount-point": "/", "total-space": 4096000, "filesystem": "ext3"})]} self.assertMessages(messages, [expected_message, expected_message]) def test_bind_mounts(self): """ Mounted devices that are mounted using Linux's "--bind" option shouldn't be listed, as they have the same free space/used space as the device they're bound to. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) # From this test data, we expect only two mount points to be returned, # and the other two to be ignored (the rebound /dev/hda2 -> /mnt # mounting) filename = self.makeFile("""\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 /dev/devices/by-uuid/12345567 /media/Boot\\040OSX hfsplus rw 0 0 """) mtab_filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /opt /mnt none rw,bind 0 0 /opt /media/Boot\\040OSX none rw,bind 0 0 """) plugin = MountInfo(mounts_file=filename, create_time=self.reactor.time, statvfs=statvfs, mtab_file=mtab_filename) self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() self.assertEqual(message.get("mount-info"), [(0, {"device": "/dev/devices/by-uuid/12345567", "mount-point": "/", "total-space": 4096000L, "filesystem": "ext3"}), (0, {"device": "/dev/hda2", "mount-point": "/usr", "total-space": 4096000L, "filesystem": "ext3"}), ]) def test_no_mtab_file(self): """ If there's no mtab file available, then we can make no guesses about bind mounted directories, so any filesystems in /proc/mounts will be reported. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) # In this test, we expect all mount points to be returned, as we can't # identify any as bind mounts. filename = self.makeFile("""\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 """) # mktemp isn't normally secure, due to race conditions, but in this # case, we don't actually create the file at all. mtab_filename = tempfile.mktemp() plugin = MountInfo(mounts_file=filename, create_time=self.reactor.time, statvfs=statvfs, mtab_file=mtab_filename) self.monitor.add(plugin) plugin.run() message = plugin.create_mount_info_message() self.assertEqual(message.get("mount-info"), [(0, {"device": "/dev/devices/by-uuid/12345567", "mount-point": "/", "total-space": 4096000L, "filesystem": "ext3"}), (0, {"device": "/dev/hda2", "mount-point": "/usr", "total-space": 4096000L, "filesystem": "ext3"}), (0, {"device": "/dev/devices/by-uuid/12345567", "mount-point": "/mnt", "total-space": 4096000L, "filesystem": "ext3"})]) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) # From this test data, we expect only two mount points to be returned, # and the third to be ignored (the rebound /dev/hda2 -> /mnt mounting) filename = self.makeFile("""\ /dev/devices/by-uuid/12345567 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /dev/devices/by-uuid/12345567 /mnt ext3 rw 0 0 """) mtab_filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 /dev/hda2 /usr ext3 rw 0 0 /opt /mnt none rw,bind 0 0 """) plugin = MountInfo(mounts_file=filename, create_time=self.reactor.time, statvfs=statvfs, mtab_file=mtab_filename) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["mount-info"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) def test_call_on_accepted(self): plugin = self.get_mount_info(create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(plugin.run_interval) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.result(succeed(None)) self.mocker.count(2) self.mocker.replay() self.reactor.fire(("message-type-acceptance-changed", "mount-info"), True) def test_persist_timing(self): """Mount info are only persisted when exchange happens. Previously mount info were persisted as soon as they were gathered: if an event happened between the persist and the exchange, the server didn't get the mount info at all. This test ensures that mount info are only saved when exchange happens. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = MountInfo(mounts_file=filename, create_time=self.reactor.time, statvfs=statvfs, mtab_file=filename) self.monitor.add(plugin) plugin.run() message1 = plugin.create_mount_info_message() self.assertEqual( message1.get("mount-info"), [(0, {"device": "/dev/hda1", "filesystem": "ext3", "mount-point": "/", "total-space": 4096000L})]) plugin.run() message2 = plugin.create_mount_info_message() self.assertEqual( message2.get("mount-info"), [(0, {"device": "/dev/hda1", "filesystem": "ext3", "mount-point": "/", "total-space": 4096000L})]) # Run again, calling create_mount_info_message purge the information plugin.run() plugin.exchange() plugin.run() message3 = plugin.create_mount_info_message() self.assertIdentical(message3, None) def test_exchange_limits_exchanged_free_space_messages(self): """ In order not to overload the server, the client should stagger the exchange of free-space messages. """ def statvfs(path): return (4096, 0, mb(1000), mb(100), 0, 0, 0, 0, 0) filename = self.makeFile("""\ /dev/hda1 / ext3 rw 0 0 """) plugin = self.get_mount_info(mounts_file=filename, statvfs=statvfs, create_time=self.reactor.time, mtab_file=filename) # Limit the test exchange to 5 items. plugin.max_free_space_items_to_exchange = 5 step_size = self.monitor.step_size self.monitor.add(plugin) # Exchange should trigger a flush of the persist database registry_mocker = self.mocker.replace(plugin.registry) registry_mocker.flush() self.mocker.result(None) self.mocker.replay() # Generate 10 data points self.reactor.advance(step_size * 10) self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) message = [d for d in messages if d["type"] == "free-space"][0] free_space = message["free-space"] free_space_items = len(free_space) self.assertEqual(free_space_items, 5) for i in range(free_space_items): self.assertEqual(free_space[i][0], (i + 1) * step_size) self.assertEqual(free_space[i][1], "/") self.assertEqual(free_space[i][2], 409600) # The second exchange should pick up the remaining items. self.mstore.delete_all_messages() self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) message = [d for d in messages if d["type"] == "free-space"][0] free_space = message["free-space"] free_space_items = len(free_space) self.assertEqual(free_space_items, 5) for i in range(free_space_items): # Note (i+6) we've already retrieved the first 5 items. self.assertEqual(free_space[i][0], (i + 6) * step_size) self.assertEqual(free_space[i][1], "/") self.assertEqual(free_space[i][2], 409600) # Third exchange should not get any further free-space messages self.mstore.delete_all_messages() self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 0) landscape-client-14.01/landscape/monitor/tests/test_networkdevice.py0000644000175000017500000000446012301414317025606 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.lib.network import ( get_active_device_info) from landscape.monitor.networkdevice import NetworkDevice def test_get_active_device_info(): # Don't skip any interfaces for the tests return get_active_device_info(skipped_interfaces=()) class NetworkDeviceTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(NetworkDeviceTest, self).setUp() self.plugin = NetworkDevice(test_get_active_device_info) self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types( [self.plugin.message_type]) def test_get_network_device(self): """A message is sent with device info""" self.plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "network-device") self.failUnlessIn("devices", message) self.assertTrue(len(message["devices"])) # only network device we can truly assert is localhost self.assertTrue(message["devices"][0]["interface"], "lo") self.assertTrue(message["devices"][0]["ip_address"], "0.0.0.0") self.assertTrue(message["devices"][0]["netmask"], "255.0.0.0") flags = message["devices"][0]["flags"] self.assertEqual(1, flags & 1) # UP self.assertEqual(8, flags & 8) # LOOPBACK self.assertEqual(64, flags & 64) # RUNNING def test_no_message_with_no_changes(self): """If no device changes from the last message, no message is sent.""" self.plugin.exchange() self.mstore.delete_all_messages() self.plugin.exchange() self.assertFalse(self.mstore.count_pending_messages()) def test_message_on_device_change(self): """When the active network devices change a message is generated.""" self.plugin.exchange() self.mstore.delete_all_messages() plugin = self.mocker.patch(self.plugin) plugin._device_info() self.mocker.result([]) self.mocker.replay() self.plugin.exchange() self.assertTrue(self.mstore.count_pending_messages()) def test_config(self): """The network device plugin is enabled by default.""" self.assertIn("NetworkDevice", self.config.plugin_factories) landscape-client-14.01/landscape/monitor/tests/test_memoryinfo.py0000644000175000017500000001471512301414317025125 0ustar andreasandreasfrom landscape.monitor.memoryinfo import MemoryInfo from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY class MemoryInfoTest(LandscapeTest): helpers = [MonitorHelper] SAMPLE_DATA = """ MemTotal: 1546436 kB MemFree: 23452 kB Buffers: 41656 kB Cached: 807628 kB SwapCached: 17572 kB Active: 1030792 kB Inactive: 426892 kB HighTotal: 0 kB HighFree: 0 kB LowTotal: 1546436 kB LowFree: 23452 kB SwapTotal: 1622524 kB SwapFree: 1604936 kB Dirty: 1956 kB Writeback: 0 kB Mapped: 661772 kB Slab: 54980 kB CommitLimit: 2395740 kB Committed_AS: 1566888 kB PageTables: 2728 kB VmallocTotal: 516088 kB VmallocUsed: 5660 kB VmallocChunk: 510252 kB """ def setUp(self): super(MemoryInfoTest, self).setUp() def test_read_proc_meminfo(self): """ When the memory info plugin runs it reads data from /proc/meminfo which it parses and accumulates to read values. This test ensures that /proc/meminfo is always parseable and that messages are in the expected format and contain data with expected datatypes. """ plugin = MemoryInfo(create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_message() self.assertTrue("type" in message) self.assertEqual(message["type"], "memory-info") self.assertTrue("memory-info" in message) memory_info = message["memory-info"] self.assertEqual(len(memory_info), 1) self.assertTrue(isinstance(memory_info[0], tuple)) self.assertTrue(len(memory_info), 3) self.assertTrue(isinstance(memory_info[0][0], int)) self.assertTrue(isinstance(memory_info[0][1], int)) self.assertTrue(isinstance(memory_info[0][2], int)) def test_read_sample_data(self): """ This test uses sample /proc/meminfo data and ensures that messages contain expected free memory and free swap values. """ filename = self.makeFile(self.SAMPLE_DATA) plugin = MemoryInfo(source_filename=filename, create_time=self.reactor.time) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) message = plugin.create_message() self.assertEqual(message["memory-info"][0], (step_size, 852, 1567)) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, a message with an empty C{memory-info} list is expected. """ filename = self.makeFile(self.SAMPLE_DATA) plugin = MemoryInfo(source_filename=filename, create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) message = plugin.create_message() self.assertEqual(len(message["memory-info"]), 1) message = plugin.create_message() self.assertEqual(len(message["memory-info"]), 0) def test_ranges_remain_contiguous_after_flush(self): """ The memory info plugin uses the accumulate function to queue messages. Timestamps should always be contiguous, and always fall on a step boundary. """ filename = self.makeFile(self.SAMPLE_DATA) plugin = MemoryInfo(source_filename=filename, create_time=self.reactor.time) self.monitor.add(plugin) step_size = self.monitor.step_size for i in range(1, 10): self.reactor.advance(step_size) message = plugin.create_message() memory_info = message["memory-info"] self.assertEqual(len(memory_info), 1) self.assertEqual(memory_info[0][0], step_size * i) def test_never_exchange_empty_messages(self): """ The plugin will create a message with an empty C{memory-info} list when no data is available. If an empty message is created during exchange, it should not be queued. """ self.mstore.set_accepted_types(["memory-info"]) filename = self.makeFile(self.SAMPLE_DATA) plugin = MemoryInfo(source_filename=filename, create_time=self.reactor.time) self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_exchange_messages(self): """ The memory info plugin queues messages when manager.exchange() is called. Each message should be aligned to a step boundary; messages collected between exchange period should be delivered in a single message. """ self.mstore.set_accepted_types(["memory-info"]) filename = self.makeFile(self.SAMPLE_DATA) plugin = MemoryInfo(source_filename=filename, create_time=self.reactor.time) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size * 2) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "memory-info", "memory-info": [(step_size, 852, 1567), (step_size * 2, 852, 1567)]}]) def test_call_on_accepted(self): plugin = MemoryInfo(source_filename=self.makeFile(self.SAMPLE_DATA), create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 1) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.reactor.fire(("message-type-acceptance-changed", "memory-info"), True) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ plugin = MemoryInfo(create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) self.monitor.exchange() self.mstore.set_accepted_types(["memory-info"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) landscape-client-14.01/landscape/monitor/tests/test_rebootrequired.py0000644000175000017500000001144712301414317025773 0ustar andreasandreasfrom landscape.monitor.rebootrequired import RebootRequired from landscape.tests.helpers import ( LandscapeTest, MonitorHelper, LogKeeperHelper) from landscape.tests.mocker import ANY class RebootRequiredTest(LandscapeTest): helpers = [MonitorHelper, LogKeeperHelper] def setUp(self): super(RebootRequiredTest, self).setUp() self.reboot_required_filename = self.makeFile() self.plugin = RebootRequired(self.reboot_required_filename) self.monitor.add(self.plugin) self.mstore.set_accepted_types(["reboot-required-info"]) def test_wb_get_flag(self): """ L{RebootRequired._get_flag} returns C{True} if the reboot-required flag file is present, C{False} otherwise. """ self.assertFalse(self.plugin._get_flag()) self.makeFile(path=self.reboot_required_filename, content="") self.assertTrue(self.plugin._get_flag()) def test_wb_get_packages(self): """ L{RebootRequired._get_packages} returns the packages listed in the reboot-required packages file if present, or an empty list otherwise. """ self.assertEqual([], self.plugin._get_packages()) self.makeFile(path=self.reboot_required_filename + ".pkgs", content="foo\nbar\n") self.assertEqual(["bar", "foo"], self.plugin._get_packages()) def test_wb_get_packages_with_duplicates(self): """ The list of packages returned by L{RebootRequired._get_packages} does not contain duplicate values. """ self.assertEqual([], self.plugin._get_packages()) self.makeFile(path=self.reboot_required_filename + ".pkgs", content="foo\nfoo\n") self.assertEqual(["foo"], self.plugin._get_packages()) def test_wb_get_packages_with_blank_lines(self): """ Blank lines are ignored by L{RebootRequired._get_packages}. """ self.assertEqual([], self.plugin._get_packages()) self.makeFile(path=self.reboot_required_filename + ".pkgs", content="bar\n\nfoo\n") self.assertEqual(["bar", "foo"], self.plugin._get_packages()) def test_wb_create_message(self): """ A message should be created if and only if the reboot-required status of the system has changed. """ self.assertEqual({"flag": False, "packages": []}, self.plugin._create_message()) self.makeFile(path=self.reboot_required_filename, content="") self.assertEqual({"flag": True}, self.plugin._create_message()) self.makeFile(path=self.reboot_required_filename + ".pkgs", content="foo\n") self.assertEqual({"packages": [u"foo"]}, self.plugin._create_message()) def test_send_message(self): """ A new C{"reboot-required-info"} message should be enqueued if and only if the reboot-required status of the system has changed. """ self.makeFile(path=self.reboot_required_filename + ".pkgs", content="foo\n") self.makeFile(path=self.reboot_required_filename, content="") self.plugin.send_message() self.assertIn("Queueing message with updated reboot-required status.", self.logfile.getvalue()) self.assertMessages(self.mstore.get_pending_messages(), [{"type": "reboot-required-info", "flag": True, "packages": [u"foo"]}]) self.mstore.delete_all_messages() self.plugin.send_message() self.assertMessages(self.mstore.get_pending_messages(), []) def test_run_interval(self): """ The L{RebootRequired} plugin will be scheduled to run every 15 minutes. """ self.assertEqual(900, self.plugin.run_interval) def test_run_immediately(self): """ The L{RebootRequired} plugin will be run immediately at startup. """ self.assertTrue(True, self.plugin.run_immediately) def test_run(self): """ If the server can accept them, the plugin should send C{reboot-required} messages. """ broker_mock = self.mocker.replace(self.remote) broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.plugin.run() self.mstore.set_accepted_types([]) self.plugin.run() def test_resynchronize(self): """ The "resynchronize" reactor message cause the plugin to send fresh data. """ self.plugin.run() self.reactor.fire("resynchronize", scopes=["package"]) self.plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) landscape-client-14.01/landscape/monitor/tests/test_usermonitor.py0000644000175000017500000004276412301414317025334 0ustar andreasandreasfrom twisted.internet.defer import fail from landscape.amp import ComponentPublisher from landscape.monitor.usermonitor import ( UserMonitor, RemoteUserMonitorConnector) from landscape.manager.usermanager import UserManager from landscape.user.tests.helpers import FakeUserProvider from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY class UserMonitorNoManagerTest(LandscapeTest): helpers = [MonitorHelper] def test_no_fetch_users_in_monitor_only_mode(self): """ If we're in monitor_only mode, then all users are assumed to be unlocked. """ self.config.monitor_only = True def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) plugin.stop() users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] groups = [("webdev", "x", 1000, ["jdoe"])] provider = FakeUserProvider(users=users, groups=groups) plugin = UserMonitor(provider=provider) plugin.register(self.monitor) self.broker_service.message_store.set_accepted_types(["users"]) result = plugin.run() result.addCallback(got_result) return result class UserMonitorTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(UserMonitorTest, self).setUp() self.shadow_file = self.makeFile( "jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7:::\n" "psmith:!:13348:0:99999:7:::\n" "sam:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n") self.user_manager = UserManager(shadow_file=self.shadow_file) self.publisher = ComponentPublisher(self.user_manager, self.reactor, self.config) self.publisher.start() self.provider = FakeUserProvider() self.plugin = UserMonitor(self.provider) def tearDown(self): self.publisher.stop() self.plugin.stop() return super(UserMonitorTest, self).tearDown() def test_constants(self): """ L{UserMonitor.persist_name} and L{UserMonitor.run_interval} need to be present for L{Plugin} to work properly. """ self.assertEqual(self.plugin.persist_name, "users") self.assertEqual(self.plugin.run_interval, 3600) def test_wb_resynchronize_event(self): """ When a C{resynchronize} event, with 'users' scope, occurs any cached L{UserChange} snapshots should be cleared and a new message with users generated. """ self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) self.successResultOf(self.plugin.run()) persist = self.plugin._persist self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.broker_service.message_store.delete_all_messages() deferred = self.monitor.reactor.fire( "resynchronize", scopes=["users"])[0] self.successResultOf(deferred) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) def test_wb_resynchronize_event_with_global_scope(self): """ When a C{resynchronize} event, with global scope, occurs we act exactly as if it had 'users' scope. """ self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) self.successResultOf(self.plugin.run()) persist = self.plugin._persist self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.broker_service.message_store.delete_all_messages() deferred = self.monitor.reactor.fire("resynchronize")[0] self.successResultOf(deferred) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) def test_do_not_resynchronize_with_other_scope(self): """ When a C{resynchronize} event, with an irrelevant scope, occurs we do nothing. """ self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) self.successResultOf(self.plugin.run()) persist = self.plugin._persist self.assertTrue(persist.get("users")) self.assertTrue(persist.get("groups")) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.broker_service.message_store.delete_all_messages() self.monitor.reactor.fire("resynchronize", scopes=["disk"])[0] self.assertMessages( self.broker_service.message_store.get_pending_messages(), []) def test_run(self): """ The L{UserMonitor} should have message run which should enqueue a message with a diff-like representation of changes since the last run. """ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.add(self.plugin) result = self.plugin.run() result.addCallback(got_result) return result def test_run_interval(self): """ L{UserMonitor.register} calls the C{register} method on it's super class, which sets up a looping call to run the plugin every L{UserMonitor.run_interval} seconds. """ self.plugin.run = self.mocker.mock() self.expect(self.plugin.run()).count(5) self.mocker.replay() self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types(["users"]) self.reactor.advance(self.plugin.run_interval * 5) def test_run_with_operation_id(self): """ The L{UserMonitor} should have message run which should enqueue a message with a diff-like representation of changes since the last run. """ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "operation-id": 1001, "type": "users"}]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types(["users"]) result = self.plugin.run(1001) result.addCallback(got_result) return result def test_detect_changes(self): def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.broker_service.message_store.set_accepted_types(["users"]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) result = connector.connect() result.addCallback(lambda remote: remote.detect_changes()) result.addCallback(got_result) result.addCallback(lambda x: connector.disconnect()) return result def test_detect_changes_with_operation_id(self): """ The L{UserMonitor} should expose a remote C{remote_run} method which should call the remote """ def got_result(result): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "operation-id": 1001, "type": "users"}]) self.broker_service.message_store.set_accepted_types(["users"]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) result = connector.connect() result.addCallback(lambda remote: remote.detect_changes(1001)) result.addCallback(got_result) result.addCallback(lambda x: connector.disconnect()) return result def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ def got_result(result): mstore = self.broker_service.message_store self.assertMessages(list(mstore.get_pending_messages()), []) mstore.set_accepted_types(["users"]) self.assertMessages(list(mstore.get_pending_messages()), []) self.broker_service.message_store.set_accepted_types([]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) result = connector.connect() result.addCallback(lambda remote: remote.detect_changes(1001)) result.addCallback(got_result) result.addCallback(lambda x: connector.disconnect()) return result def test_call_on_accepted(self): def got_result(result): mstore = self.broker_service.message_store self.assertMessages(mstore.get_pending_messages(), [{"create-group-members": {u"webdev":[u"jdoe"]}, "create-groups": [{"gid": 1000, "name": u"webdev"}], "create-users": [{"enabled": True, "home-phone": None, "location": None, "name": u"JD", "primary-gid": 1000, "uid": 1000, "username": u"jdoe", "work-phone": None}], "type": "users"}]) self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) self.broker_service.message_store.set_accepted_types(["users"]) result = self.reactor.fire( ("message-type-acceptance-changed", "users"), True) result = [x for x in result if x][0] result.addCallback(got_result) return result def test_do_not_persist_changes_when_send_message_fails(self): """ When the plugin is run it persists data that it uses on subsequent checks to calculate the delta to send. It should only persist data when the broker confirms that the message sent by the plugin has been sent. """ self.log_helper.ignore_errors(RuntimeError) def got_result(result): persist = self.plugin._persist mstore = self.broker_service.message_store self.assertMessages(mstore.get_pending_messages(), []) self.assertFalse(persist.get("users")) self.assertFalse(persist.get("groups")) self.broker_service.message_store.set_accepted_types(["users"]) self.monitor.broker.send_message = self.mocker.mock() self.monitor.broker.send_message(ANY, ANY, urgent=True) self.mocker.result(fail(RuntimeError())) self.mocker.replay() self.provider.users = [("jdoe", "x", 1000, 1000, "JD,,,,", "/home/jdoe", "/bin/sh")] self.provider.groups = [("webdev", "x", 1000, ["jdoe"])] self.monitor.add(self.plugin) connector = RemoteUserMonitorConnector(self.reactor, self.config) result = connector.connect() result.addCallback(lambda remote: remote.detect_changes(1001)) result.addCallback(got_result) result.addCallback(lambda x: connector.disconnect()) return result landscape-client-14.01/landscape/monitor/tests/test_temperature.py0000644000175000017500000001530512301414317025272 0ustar andreasandreasimport os import tempfile from landscape.monitor.temperature import Temperature from landscape.lib.tests.test_sysstats import ThermalZoneTest from landscape.tests.helpers import MonitorHelper from landscape.tests.mocker import ANY class TemperatureTestWithSampleData(ThermalZoneTest): """Tests for the temperature plugin.""" helpers = [MonitorHelper] def setUp(self): """Initialize test helpers and create a sample thermal zone.""" super(TemperatureTestWithSampleData, self).setUp() self.mstore.set_accepted_types(["temperature"]) self.write_thermal_zone("ZONE1", "50 C") def test_wb_disabled_with_no_thermal_zones(self): """ When no thermal zones are available /proc/acpi/thermal_zone will be empty. In this case, the plugin won't register itself to respond to client events such as exchange. """ thermal_zone_path = tempfile.mkdtemp() os.rmdir(thermal_zone_path) plugin = Temperature(thermal_zone_path=thermal_zone_path) self.assertEqual(plugin._thermal_zones, []) def test_no_messages_without_thermal_zones(self): """ Messages should never be generated by the plugin when no thermal zones are available. """ thermal_zone_path = self.makeDir() plugin = Temperature(interval=1, thermal_zone_path=thermal_zone_path) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_disjointed_thermal_zone_temperature_changes(self): """ Changing data needs to be tracked according to the thermal zone the data is for. This test ensures that the plugin creates messages with changes reported correctly. """ self.write_thermal_zone("ZONE2", "50 C") plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) self.write_thermal_zone("ZONE2", "56 C") self.reactor.advance(step_size) messages = list(plugin.create_messages()) self.assertEqual(len(messages), 2) self.assertEqual(messages[0]["thermal-zone"], "ZONE1") self.assertEqual(len(messages[0]["temperatures"]), 2) self.assertEqual(messages[0]["temperatures"][0], (step_size, 50.0)) self.assertEqual(messages[0]["temperatures"][1], (step_size * 2, 50.0)) self.assertEqual(messages[1]["thermal-zone"], "ZONE2") self.assertEqual(len(messages[1]["temperatures"]), 2) self.assertEqual(messages[1]["temperatures"][0], (step_size, 50.0)) self.assertEqual(messages[1]["temperatures"][1], (step_size * 2, 56.0)) def test_messaging_flushes(self): """ Duplicate message should never be created. If no data is available, a message with an empty C{temperatures} list is expected. """ plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size) messages = plugin.create_messages() self.assertEqual(len(messages), 1) messages = plugin.create_messages() self.assertEqual(len(messages), 0) def test_never_exchange_empty_messages(self): """ The plugin will only create messages when data is available. If no data is available when an exchange occurs no messages should not be queued. """ self.write_thermal_zone("ZONE2", "50 C") plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) self.monitor.add(plugin) self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_exchange_messages(self): """ The temperature plugin queues message when an exchange happens. Each message should be aligned to a step boundary; messages collected bewteen exchange periods should be delivered in a single message. """ self.write_thermal_zone("ZONE2", "50 C") plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "temperature", "thermal-zone": "ZONE1", "temperatures": [(step_size, 50.0)]}, {"type": "temperature", "thermal-zone": "ZONE2", "temperatures": [(step_size, 50.0)]}]) def test_no_messages_on_bad_values(self): """ If the temperature is in an unknown format, the plugin won't break and no messages are sent. """ self.write_thermal_zone("ZONE1", "UNKNOWN C") plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) step_size = self.monitor.step_size self.monitor.add(plugin) self.reactor.advance(step_size) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), []) def test_call_on_accepted(self): plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(plugin.registry.step_size) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.reactor.fire(("message-type-acceptance-changed", "temperature"), True) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) plugin = Temperature(thermal_zone_path=self.thermal_zone_path, create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["temperature"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) landscape-client-14.01/landscape/monitor/tests/test_service.py0000644000175000017500000000570312301414317024376 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest, FakeBrokerServiceHelper from landscape.reactor import FakeReactor from landscape.monitor.config import MonitorConfiguration, ALL_PLUGINS from landscape.monitor.service import MonitorService from landscape.monitor.computerinfo import ComputerInfo from landscape.monitor.loadaverage import LoadAverage class MonitorServiceTest(LandscapeTest): helpers = [FakeBrokerServiceHelper] def setUp(self): super(MonitorServiceTest, self).setUp() config = MonitorConfiguration() config.load(["-c", self.config_filename]) class FakeMonitorService(MonitorService): reactor_factory = FakeReactor self.service = FakeMonitorService(config) self.log_helper.ignore_errors("Typelib file for namespace") def test_plugins(self): """ By default the L{MonitorService.plugins} list holds an instance of every enabled monitor plugin. """ self.assertEqual(len(self.service.plugins), len(ALL_PLUGINS)) def test_get_plugins(self): """ If the C{--monitor-plugins} command line option is specified, only the given plugins will be enabled. """ self.service.config.load(["--monitor-plugins", "ComputerInfo, LoadAverage"]) plugins = self.service.get_plugins() self.assertTrue(isinstance(plugins[0], ComputerInfo)) self.assertTrue(isinstance(plugins[1], LoadAverage)) def test_start_service(self): """ The L{MonitorService.startService} method connects to the broker, starts the plugins and register the monitor as broker client. It also start listening on its own socket for incoming connections. """ def stop_service(ignored): [connector] = self.broker_service.broker.get_connectors() connector.disconnect() self.service.stopService() self.broker_service.stopService() def assert_broker_connection(ignored): self.assertEqual(len(self.broker_service.broker.get_clients()), 1) self.assertIs(self.service.broker, self.service.monitor.broker) result = self.service.broker.ping() return result.addCallback(stop_service) self.broker_service.startService() started = self.service.startService() return started.addCallback(assert_broker_connection) def test_stop_service(self): """ The L{MonitorService.stopService} method flushes the data before shutting down the monitor, and closes the connection with the broker. """ self.service.monitor = self.mocker.mock() self.service.monitor.flush() self.service.connector = self.mocker.mock() self.service.connector.disconnect() self.service.publisher = self.mocker.mock() self.service.publisher.stop() self.mocker.replay() self.service.stopService() landscape-client-14.01/landscape/monitor/tests/test_jujuinfo.py0000644000175000017500000000706112301414317024566 0ustar andreasandreasimport json import os from landscape.monitor.jujuinfo import JujuInfo from landscape.tests.helpers import LandscapeTest, MonitorHelper SAMPLE_JUJU_INFO = json.dumps({"environment-uuid": "DEAD-BEEF", "unit-name": "juju-unit-name", "api-addresses": "10.0.3.1:17070", "private-address": "127.0.0.1"}) class JujuInfoTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(JujuInfoTest, self).setUp() self.mstore.set_accepted_types(["juju-info"]) self.plugin = JujuInfo() self.monitor.add(self.plugin) self.makeFile(SAMPLE_JUJU_INFO, path=self.config.juju_filename) def test_get_sample_juju_info(self): """ Sample data is used to ensure that expected values end up in the Juju data reported by the plugin. """ self.plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "juju-info") self.assertEqual(message["environment-uuid"], "DEAD-BEEF") self.assertEqual(message["unit-name"], "juju-unit-name") self.assertEqual(message["api-addresses"], ["10.0.3.1:17070"]) def test_juju_info_reported_only_once(self): """ Juju data shouldn't be reported unless it's changed since the last time it was reported. """ self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "juju-info") self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) def test_report_changed_juju_info(self): """ When juju data changes, the new data should be sent to the server. """ self.plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "juju-info") self.assertEqual(message["environment-uuid"], "DEAD-BEEF") self.assertEqual(message["unit-name"], "juju-unit-name") self.assertEqual(message["api-addresses"], ["10.0.3.1:17070"]) self.makeFile( json.dumps({"environment-uuid": "FEED-BEEF", "unit-name": "changed-unit-name", "api-addresses": "10.0.3.2:17070", "private-address": "127.0.1.1"}), path=self.config.juju_filename) self.plugin.exchange() message = self.mstore.get_pending_messages()[1] self.assertEqual(message["type"], "juju-info") self.assertEqual(message["environment-uuid"], "FEED-BEEF") self.assertEqual(message["unit-name"], "changed-unit-name") self.assertEqual(message["api-addresses"], ["10.0.3.2:17070"]) def test_no_message_with_invalid_json(self): """No Juju message is sent if the JSON file is invalid.""" self.makeFile("barf", path=self.config.juju_filename) self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages, []) self.log_helper.ignore_errors(ValueError) self.assertTrue( "Error attempting to read JSON from" in self.logfile.getvalue()) def test_no_message_with_missing_file(self): """No Juju message is sent if the JSON file is missing.""" os.remove(self.config.juju_filename) self.plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(messages, []) landscape-client-14.01/landscape/monitor/tests/test_monitor.py0000644000175000017500000000424512301414317024425 0ustar andreasandreasfrom landscape.monitor.monitor import Monitor from landscape.lib.persist import Persist from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.broker.client import BrokerClientPlugin class MonitorTest(LandscapeTest): helpers = [MonitorHelper] def test_persist(self): """ A L{Monitor} instance has a C{persist} attribute. """ self.monitor.persist.set("a", 1) self.assertEqual(self.monitor.persist.get("a"), 1) def test_flush_saves_persist(self): """ The L{Monitor.flush} method saves any changes made to the persist database. """ self.monitor.persist.set("a", 1) self.monitor.flush() persist = Persist() persist.load(self.monitor.persist_filename) self.assertEqual(persist.get("a"), 1) def test_flush_after_exchange(self): """ The L{Monitor.exchange} method flushes the monitor after C{exchange} on all plugins has been called. """ plugin = BrokerClientPlugin() plugin.exchange = lambda: self.monitor.persist.set("a", 1) self.monitor.add(plugin) self.monitor.exchange() persist = Persist() persist.load(self.monitor.persist_filename) self.assertEqual(persist.get("a"), 1) def test_flush_every_flush_interval(self): """ The L{Monitor.flush} method gets called every C{flush_interval} seconds, and perists data to the disk. """ self.monitor.persist.save = self.mocker.mock() self.monitor.persist.save(self.monitor.persist_filename) self.mocker.count(3) self.mocker.replay() self.reactor.advance(self.config.flush_interval * 3) def test_creating_loads_persist(self): """ If C{persist_filename} exists, it is loaded by the constructor. """ filename = self.makeFile() persist = Persist() persist.set("a", "Hi there!") persist.save(filename) monitor = Monitor(self.reactor, self.config, persist=Persist(), persist_filename=filename) self.assertEqual(monitor.persist.get("a"), "Hi there!") landscape-client-14.01/landscape/monitor/tests/test_cephusage.py0000644000175000017500000002742312301414317024705 0ustar andreasandreasimport os import json from twisted.internet.defer import succeed from landscape.lib.fs import touch_file from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.monitor.cephusage import CephUsage SAMPLE_OLD_TEMPLATE = ( " health HEALTH_WARN 6 pgs degraded; 6 pgs stuck " "unclean\n" "monmap e2: 3 mons at {server-269703f4-5217-495a-b7f2-b3b3473c1719=" "10.55.60.238:6789/0,server-3f370698-f3b0-4cbe-8db9-a18e304c952b=" "10.55.60.141:6789/0,server-f635fa07-e36f-453c-b3d5-b4ce86fbc6ff=" "10.55.60.241:6789/0}, election epoch 8, quorum 0,1,2 " "server-269703f4-5217-495a-b7f2-b3b3473c1719," "server-3f370698-f3b0-4cbe-8db9-a18e304c952b," "server-f635fa07-e36f-453c-b3d5-b4ce86fbc6ff\n " "osdmap e9: 3 osds: 3 up, 3 in\n " "pgmap v114: 192 pgs: 186 active+clean, 6 active+degraded; " "0 bytes data, %s MB used, %s MB / %s MB avail\n " "mdsmap e1: 0/0/1 up\n\n") SAMPLE_NEW_TEMPLATE = ( "health HEALTH_OK\n" " monmap e2: 3 mons at {inst-007=192.168.64.139:6789/0," "inst-008=192.168.64.140:6789/0,inst-009=192.168.64.141:6789/0}, " "election epoch 6, quorum 0,1,2 inst-007,inst-008,inst-009\n" " osdmap e28: 3 osds: 3 up, 3 in\n" " pgmap v193861: 208 pgs: 208 active+clean; 5514 MB data, %s MB used, " "%s MB / %s MB avail; 1739KB/s wr, 54op/s\n" " mdsmap e1: 0/0/1 up\n") SAMPLE_OUTPUT = SAMPLE_NEW_TEMPLATE % (4296, 53880, 61248) SAMPLE_OLD_OUTPUT = SAMPLE_OLD_TEMPLATE % (4296, 53880, 61248) SAMPLE_QUORUM = ( '{ "election_epoch": 8,\n' ' "quorum": [\n' ' 0,\n' ' 1,\n' ' 2],\n' ' "monmap": { "epoch": 2,\n' ' "fsid": "%s",\n' ' "modified": "2013-01-13 16:58:00.141737",\n' ' "created": "0.000000",\n' ' "mons": [\n' ' { "rank": 0,\n' ' "name": "server-1be72d64-0ff2-4ac1-ad13-1c06c8201011",\n' ' "addr": "10.55.60.188:6789\/0"},\n' ' { "rank": 1,\n' ' "name": "server-e847f147-ed13-46c2-8e6d-768aa32657ab",\n' ' "addr": "10.55.60.202:6789\/0"},\n' ' { "rank": 2,\n' ' "name": "server-3c831a0b-51d5-43a9-95d5-63644f0965cc",\n' ' "addr": "10.55.60.205:6789\/0"}]}}\n') SAMPLE_QUORUM_OUTPUT = SAMPLE_QUORUM % "ecbb8960-0e21-11e2-b495-83a88f44db01" class CephUsagePluginTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): super(CephUsagePluginTest, self).setUp() self.mstore = self.broker_service.message_store self.plugin = CephUsage(create_time=self.reactor.time) def test_wb_get_ceph_usage_if_command_not_found(self): """ When the ceph command cannot be found or accessed, the C{_get_ceph_usage} method returns None. """ self.plugin._get_status_command_output = lambda: succeed(None) self.monitor.add(self.plugin) self.assertIs( None, self.successResultOf(self.plugin._get_ceph_usage())) def test_wb_get_ceph_usage(self): """ When the ceph command call returns output, the _get_ceph_usage method returns the percentage of used space. """ self.plugin._get_status_command_output = lambda: succeed(SAMPLE_OUTPUT) self.monitor.add(self.plugin) self.assertEqual( 0.12029780564263323, self.successResultOf(self.plugin._get_ceph_usage())) def test_wb_get_ceph_usage_old_format(self): """ The _get_ceph_usage method understands command output in the "old" format (the output changed around version 0.56.1) """ self.plugin._get_status_command_output = ( lambda: succeed(SAMPLE_OLD_OUTPUT)) self.monitor.add(self.plugin) self.assertEqual( 0.12029780564263323, self.successResultOf(self.plugin._get_ceph_usage())) def test_wb_get_ceph_usage_empty_disk(self): """ When the ceph command call returns output for empty disks, the _get_ceph_usage method returns 0.0 . """ self.plugin._get_status_command_output = ( lambda: succeed(SAMPLE_NEW_TEMPLATE % (0, 100, 100))) self.monitor.add(self.plugin) self.assertEqual( 0.0, self.successResultOf(self.plugin._get_ceph_usage())) def test_wb_get_ceph_usage_full_disk(self): """ When the ceph command call returns output for empty disks, the _get_ceph_usage method returns 1.0 . """ self.plugin._get_status_command_output = ( lambda: succeed(SAMPLE_NEW_TEMPLATE % (100, 0, 100))) self.monitor.add(self.plugin) self.assertEqual( 1.0, self.successResultOf(self.plugin._get_ceph_usage())) def test_wb_get_ceph_usage_no_information(self): """ When the ceph command outputs something that does not contain the disk usage information, the _get_ceph_usage method returns None. """ output = "Blah\nblah" error = "Could not parse command output: '%s'" % output self.log_helper.ignore_errors(error) self.plugin._get_status_command_output = lambda: succeed(output) self.monitor.add(self.plugin) self.assertIs( None, self.successResultOf(self.plugin._get_ceph_usage())) def test_never_exchange_empty_messages(self): """ The plugin will create a message with an empty C{ceph-usages} list when no previous data is available. If an empty message is created during exchange, it should not be queued. """ self.mstore.set_accepted_types(["ceph-usage"]) self.monitor.add(self.plugin) self.monitor.exchange() self.assertEqual(0, len(self.mstore.get_pending_messages())) def test_exchange_messages(self): """ The Ceph usage plugin queues message when manager.exchange() is called. """ ring_id = "whatever" self.mstore.set_accepted_types(["ceph-usage"]) self.plugin._ceph_usage_points = [(60, 1.0)] self.plugin._ceph_ring_id = ring_id self.monitor.add(self.plugin) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "ceph-usage", "ceph-usages": [(60, 1.0)], "ring-id": ring_id}]) def test_create_message(self): """ Calling create_message returns an expected message. """ ring_id = "blah" self.plugin._ceph_usage_points = [] self.plugin._ceph_ring_id = ring_id message = self.plugin.create_message() self.assertIn("type", message) self.assertEqual(message["type"], "ceph-usage") self.assertIn("ceph-usages", message) self.assertEqual(ring_id, message["ring-id"]) ceph_usages = message["ceph-usages"] self.assertEqual(len(ceph_usages), 0) point = (60, 1.0) self.plugin._ceph_usage_points = [point] message = self.plugin.create_message() self.assertIn("type", message) self.assertEqual(message["type"], "ceph-usage") self.assertIn("ceph-usages", message) self.assertEqual(ring_id, message["ring-id"]) ceph_usages = message["ceph-usages"] self.assertEqual(len(ceph_usages), 1) self.assertEqual([point], ceph_usages) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ interval = 30 monitor_interval = 300 plugin = CephUsage( interval=interval, monitor_interval=monitor_interval, create_time=self.reactor.time) self.monitor.add(plugin) self.reactor.advance(monitor_interval * 2) self.monitor.exchange() self.mstore.set_accepted_types(["ceph-usage"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) def test_wb_get_ceph_ring_id(self): """ When given a well formatted command output, the _get_ceph_ring_id() method returns the correct ring_id. """ uuid = "i-am-a-uuid" self.plugin._get_quorum_command_output = ( lambda: succeed(SAMPLE_QUORUM % uuid)) self.assertEqual( uuid, self.successResultOf(self.plugin._get_ceph_ring_id())) def test_wb_get_ceph_ring_id_valid_json_no_information(self): """ When the _get_quorum_command_output method returns something without the ring uuid information present but that is valid JSON, the _get_ceph_ring_id method returns None. """ error = "Could not get ring_id from output: '{\"election_epoch\": 8}'." self.log_helper.ignore_errors(error) def return_output(): # Valid JSON - just without the info we're looking for. data = {"election_epoch": 8} return succeed(json.dumps(data)) self.plugin._get_quorum_command_output = return_output self.assertIs( None, self.successResultOf(self.plugin._get_ceph_ring_id())) def test_wb_get_ceph_ring_id_no_information(self): """ When the _get_quorum_command_output method returns something without the ring uuid information present, the _get_ceph_ring_id method returns None. """ error = "Could not get ring_id from output: 'Blah\nblah'." self.log_helper.ignore_errors(error) self.plugin._get_quorum_command_output = lambda: succeed("Blah\nblah") self.assertIs( None, self.successResultOf(self.plugin._get_ceph_ring_id())) def test_wb_get_ceph_ring_id_command_exception(self): """ When the _get_quorum_command_output method returns None (if an exception happened for example), the _get_ceph_ring_id method returns None and logs no error. """ self.plugin._get_quorum_command_output = lambda: succeed(None) self.assertIs( None, self.successResultOf(self.plugin._get_ceph_ring_id())) def test_plugin_run(self): """ The plugin's run() method fills the _ceph_usage_points with accumulated samples after each C{interval} period. The _ceph_ring_id member of the plugin is also filled with the output of the _get_ceph_ring_id method. """ monitor_interval = 300 interval = monitor_interval plugin = CephUsage( interval=interval, monitor_interval=monitor_interval, create_time=self.reactor.time) uuid = "i-am-a-unique-snowflake" # The config file must be present for the plugin to run. ceph_client_dir = os.path.join(self.config.data_path, "ceph-client") ceph_conf = os.path.join(ceph_client_dir, "ceph.landscape-client.conf") os.mkdir(ceph_client_dir) touch_file(ceph_conf) plugin._ceph_config = ceph_conf plugin._get_quorum_command_output = ( lambda: succeed(SAMPLE_QUORUM % uuid)) plugin._get_status_command_output = ( lambda: succeed(SAMPLE_NEW_TEMPLATE % (100, 0, 100))) self.monitor.add(plugin) self.reactor.advance(monitor_interval * 2) self.assertEqual([(300, 1.0), (600, 1.0)], plugin._ceph_usage_points) self.assertEqual(uuid, plugin._ceph_ring_id) def test_resynchronize_message_calls_reset_method(self): """ If the reactor fires a "resynchronize" even the C{_reset} method on the ceph plugin object is called. """ self.called = False def stub_reset(): self.called = True self.plugin._reset = stub_reset self.monitor.add(self.plugin) self.reactor.fire("resynchronize") self.assertTrue(self.called) landscape-client-14.01/landscape/monitor/tests/__init__.py0000644000175000017500000000000012301414317023417 0ustar andreasandreaslandscape-client-14.01/landscape/monitor/tests/test_computerinfo.py0000644000175000017500000004673412301414317025461 0ustar andreasandreasimport os import re from twisted.internet.defer import succeed, fail, inlineCallbacks from landscape.lib.fetch import HTTPCodeError, PyCurlError from landscape.lib.fs import create_file from landscape.monitor.computerinfo import ComputerInfo, METADATA_RETRY_MAX from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY SAMPLE_LSB_RELEASE = "DISTRIB_ID=Ubuntu\n" \ "DISTRIB_RELEASE=6.06\n" \ "DISTRIB_CODENAME=dapper\n" \ "DISTRIB_DESCRIPTION=\"Ubuntu 6.06.1 LTS\"\n" def get_fqdn(): return "ooga.local" class ComputerInfoTest(LandscapeTest): helpers = [MonitorHelper] sample_memory_info = """ MemTotal: 1547072 kB MemFree: 106616 kB Buffers: 267088 kB Cached: 798388 kB SwapCached: 0 kB Active: 728952 kB Inactive: 536512 kB HighTotal: 646016 kB HighFree: 42204 kB LowTotal: 901056 kB LowFree: 64412 kB SwapTotal: 1622524 kB SwapFree: 1622524 kB Dirty: 24 kB Writeback: 0 kB Mapped: 268756 kB Slab: 105492 kB CommitLimit: 2396060 kB Committed_AS: 1166936 kB PageTables: 2748 kB VmallocTotal: 114680 kB VmallocUsed: 6912 kB VmallocChunk: 107432 kB """ def setUp(self): LandscapeTest.setUp(self) self.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.query_results = {} def fetch_stub(url, **kwargs): value = self.query_results[url] if isinstance(value, Exception): return fail(value) else: return succeed(value) self.fetch_func = fetch_stub self.add_query_result("instance-id", "i00001") self.add_query_result("ami-id", "ami-00002") self.add_query_result("instance-type", "hs1.8xlarge") def add_query_result(self, name, value): """ Add a url to self.query_results that is then available through self.fetch_func. """ url = "http://169.254.169.254/latest/meta-data/" + name self.query_results[url] = value def test_get_fqdn(self): self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(get_fqdn=get_fqdn, fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "computer-info") self.assertEqual(messages[0]["hostname"], "ooga.local") def test_get_real_hostname(self): self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "computer-info") self.assertNotEquals(len(messages[0]["hostname"]), 0) self.assertTrue(re.search("\w", messages[0]["hostname"])) def test_only_report_changed_hostnames(self): self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(get_fqdn=get_fqdn, fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) def test_report_changed_hostnames(self): def hostname_factory(hostnames=["ooga", "wubble", "wubble"]): i = 0 while i < len(hostnames): yield hostnames[i] i = i + 1 self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(get_fqdn=hostname_factory().next, fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["hostname"], "ooga") plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) self.assertEqual(messages[1]["hostname"], "wubble") def test_get_total_memory(self): self.mstore.set_accepted_types(["computer-info"]) meminfo_filename = self.makeFile(self.sample_memory_info) plugin = ComputerInfo(meminfo_filename=meminfo_filename, fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() message = messages[0] self.assertEqual(message["type"], "computer-info") self.assertEqual(message["total-memory"], 1510) self.assertEqual(message["total-swap"], 1584) def test_get_real_total_memory(self): self.mstore.set_accepted_types(["computer-info"]) self.makeFile(self.sample_memory_info) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "computer-info") self.assertTrue(isinstance(message["total-memory"], int)) self.assertTrue(isinstance(message["total-swap"], int)) def test_wb_report_changed_total_memory(self): self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin._get_memory_info = lambda: (1510, 1584) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["total-memory"], 1510) self.assertTrue("total-swap" in message) plugin._get_memory_info = lambda: (2048, 1584) plugin.exchange() message = self.mstore.get_pending_messages()[1] self.assertEqual(message["total-memory"], 2048) self.assertTrue("total-swap" not in message) def test_wb_report_changed_total_swap(self): self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin._get_memory_info = lambda: (1510, 1584) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["total-swap"], 1584) self.assertTrue("total-memory" in message) plugin._get_memory_info = lambda: (1510, 2048) plugin.exchange() message = self.mstore.get_pending_messages()[1] self.assertEqual(message["total-swap"], 2048) self.assertTrue("total-memory" not in message) def test_get_distribution(self): """ Various details about the distribution should be reported by the plugin. This test ensures that the right kinds of details end up in messages produced by the plugin. """ self.mstore.set_accepted_types(["distribution-info"]) plugin = ComputerInfo() self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "distribution-info") self.assertTrue("distributor-id" in message) self.assertTrue("description" in message) self.assertTrue("release" in message) self.assertTrue("code-name" in message) def test_get_sample_distribution(self): """ Sample data is used to ensure that expected values end up in the distribution data reported by the plugin. """ self.mstore.set_accepted_types(["distribution-info"]) plugin = ComputerInfo(lsb_release_filename=self.lsb_release_filename) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "distribution-info") self.assertEqual(message["distributor-id"], "Ubuntu") self.assertEqual(message["description"], "Ubuntu 6.06.1 LTS") self.assertEqual(message["release"], "6.06") self.assertEqual(message["code-name"], "dapper") def test_distribution_reported_only_once(self): """ Distribution data shouldn't be reported unless it's changed since the last time it was reported. """ self.mstore.set_accepted_types(["distribution-info"]) plugin = ComputerInfo() self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "distribution-info") plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) def test_wb_report_changed_distribution(self): """ When distribution data changes, the new data should be sent to the server. """ self.mstore.set_accepted_types(["distribution-info"]) plugin = ComputerInfo(lsb_release_filename=self.lsb_release_filename) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "distribution-info") self.assertEqual(message["distributor-id"], "Ubuntu") self.assertEqual(message["description"], "Ubuntu 6.06.1 LTS") self.assertEqual(message["release"], "6.06") self.assertEqual(message["code-name"], "dapper") plugin._lsb_release_filename = self.makeFile("""\ DISTRIB_ID=Ubuntu DISTRIB_RELEASE=6.10 DISTRIB_CODENAME=edgy DISTRIB_DESCRIPTION="Ubuntu 6.10" """) plugin.exchange() message = self.mstore.get_pending_messages()[1] self.assertEqual(message["type"], "distribution-info") self.assertEqual(message["distributor-id"], "Ubuntu") self.assertEqual(message["description"], "Ubuntu 6.10") self.assertEqual(message["release"], "6.10") self.assertEqual(message["code-name"], "edgy") def test_unknown_distribution_key(self): self.mstore.set_accepted_types(["distribution-info"]) lsb_release_filename = self.makeFile("""\ DISTRIB_ID=Ubuntu DISTRIB_RELEASE=6.10 DISTRIB_CODENAME=edgy DISTRIB_DESCRIPTION="Ubuntu 6.10" DISTRIB_NEW_UNEXPECTED_KEY=ooga """) plugin = ComputerInfo(lsb_release_filename=lsb_release_filename) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "distribution-info") self.assertEqual(message["distributor-id"], "Ubuntu") self.assertEqual(message["description"], "Ubuntu 6.10") self.assertEqual(message["release"], "6.10") self.assertEqual(message["code-name"], "edgy") def test_resynchronize(self): """ If a reactor event "resynchronize" is received, messages for all computer info should be generated. """ self.mstore.set_accepted_types(["distribution-info", "computer-info"]) meminfo_filename = self.makeFile(self.sample_memory_info) plugin = ComputerInfo(get_fqdn=get_fqdn, meminfo_filename=meminfo_filename, lsb_release_filename=self.lsb_release_filename, root_path=self.makeDir(), fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() self.reactor.fire("resynchronize", scopes=["computer"]) plugin.exchange() computer_info = {"type": "computer-info", "hostname": "ooga.local", "timestamp": 0, "total-memory": 1510, "total-swap": 1584} dist_info = {"type": "distribution-info", "code-name": "dapper", "description": "Ubuntu 6.06.1 LTS", "distributor-id": "Ubuntu", "release": "6.06"} self.assertMessages(self.mstore.get_pending_messages(), [computer_info, dist_info, computer_info, dist_info]) def test_computer_info_call_on_accepted(self): plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.mstore.set_accepted_types(["computer-info"]) self.reactor.fire(("message-type-acceptance-changed", "computer-info"), True) def test_distribution_info_call_on_accepted(self): plugin = ComputerInfo() self.monitor.add(plugin) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.mstore.set_accepted_types(["distribution-info"]) self.reactor.fire(("message-type-acceptance-changed", "distribution-info"), True) def test_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ plugin = ComputerInfo() self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["distribution-info", "computer-info"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) def test_annotations(self): """ L{ComputerInfo} sends extra meta data from the annotations.d directory if it's present. Each file name is used as a key in the meta-data dict and the file's contents are used as values. This allows system administrators to add extra per-computer information into Landscape which makes sense to them. """ annotations_dir = self.monitor.config.annotations_path os.mkdir(annotations_dir) create_file(os.path.join(annotations_dir, "annotation1"), "value1") create_file(os.path.join(annotations_dir, "annotation2"), "value2") self.mstore.set_accepted_types(["computer-info"]) plugin = ComputerInfo() plugin._cloud_meta_data = {} self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) meta_data = messages[0]["annotations"] self.assertEqual(2, len(meta_data)) self.assertEqual("value1", meta_data["annotation1"]) self.assertEqual("value2", meta_data["annotation2"]) def test_fetch_cloud_metadata(self): """ Fetch cloud information and insert it in a cloud-instance-metadata message. """ self.config.cloud = True self.mstore.set_accepted_types(["cloud-instance-metadata"]) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) self.assertEqual(u"i00001", messages[0]["instance-id"]) self.assertEqual(u"ami-00002", messages[0]["ami-id"]) self.assertEqual(u"hs1.8xlarge", messages[0]["instance-type"]) def test_send_cloud_instance_metadata_only_once(self): """Only send the cloud information once per client restart.""" self.config.cloud = True self.mstore.set_accepted_types(["cloud-instance-metadata"]) plugin = ComputerInfo(fetch_async=self.fetch_func) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(1, len(messages)) def test_no_fetch_ec2_meta_data_when_cloud_retries_is_max(self): """ Do not fetch EC2 info when C{_cloud_retries} is C{METADATA_RETRY_MAX} """ self.config.cloud = True self.mstore.set_accepted_types(["cloud-instance-metadata"]) plugin = ComputerInfo(fetch_async=self.fetch_func) plugin._cloud_retries = METADATA_RETRY_MAX self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(0, len(messages)) @inlineCallbacks def test_fetch_ec2_meta_data(self): """ L{_fetch_ec2_meta_data} retrieves instance information from the EC2 api. """ plugin = ComputerInfo(fetch_async=self.fetch_func) result = yield plugin._fetch_ec2_meta_data() self.assertEqual({"instance-id": u"i00001", "ami-id": u"ami-00002", "instance-type": u"hs1.8xlarge"}, result) self.assertEqual( " INFO: Querying cloud meta-data.\n" " INFO: Acquired cloud meta-data.\n", self.logfile.getvalue()) @inlineCallbacks def test_fetch_ec2_meta_data_no_cloud_api_max_retry(self): """ L{_fetch_ec2_meta_data} returns C{None} when faced with no EC2 cloud API service and reports the specific C{PyCurlError} upon message exchange when L{_cloud_retries} equals C{METADATA_RETRY_MAX}. """ self.log_helper.ignore_errors(PyCurlError) self.add_query_result("instance-id", PyCurlError(60, "pycurl error")) plugin = ComputerInfo(fetch_async=self.fetch_func) plugin._cloud_retries = METADATA_RETRY_MAX result = yield plugin._fetch_ec2_meta_data() self.assertIn( "INFO: No cloud meta-data available. " "Error 60: pycurl error\n", self.logfile.getvalue()) self.assertEqual(None, result) @inlineCallbacks def test_fetch_ec2_meta_data_bad_result_max_retry(self): """ L{_fetch_ec2_meta_data} returns C{None} and logs an error when crossing the retry threshold C{METADATA_RETRY_MAX}. """ self.log_helper.ignore_errors(HTTPCodeError) self.add_query_result("ami-id", HTTPCodeError(404, "notfound")) plugin = ComputerInfo(fetch_async=self.fetch_func) plugin._cloud_retries = METADATA_RETRY_MAX result = yield plugin._fetch_ec2_meta_data() self.assertIn( "INFO: No cloud meta-data available. Server returned " "HTTP code 404", self.logfile.getvalue()) self.assertEqual(None, result) @inlineCallbacks def test_fetch_ec2_meta_data_bad_result_retry(self): """ L{_fetch_ec2_meta_data} returns C{None} when faced with spurious errors from the EC2 api. The method increments L{_cloud_retries} counter which allows L{_fetch_ec2_meta_data} to run again next message exchange. """ self.log_helper.ignore_errors(HTTPCodeError) self.add_query_result("ami-id", HTTPCodeError(404, "notfound")) plugin = ComputerInfo(fetch_async=self.fetch_func) result = yield plugin._fetch_ec2_meta_data() self.assertEqual(1, plugin._cloud_retries) self.assertEqual(None, result) # Fix the error condition for the retry. self.add_query_result("ami-id", "ami-00002") result = yield plugin._fetch_ec2_meta_data() self.assertEqual({"instance-id": u"i00001", "ami-id": u"ami-00002", "instance-type": u"hs1.8xlarge"}, result) landscape-client-14.01/landscape/monitor/tests/test_config.py0000644000175000017500000000214012301414317024173 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.monitor.config import MonitorConfiguration, ALL_PLUGINS class MonitorConfigurationTest(LandscapeTest): def setUp(self): super(MonitorConfigurationTest, self).setUp() self.config = MonitorConfiguration() def test_plugin_factories(self): """ By default all plugins are enabled. """ self.assertEqual(self.config.plugin_factories, ALL_PLUGINS) def test_plugin_factories_with_monitor_plugins(self): """ The C{--monitor-plugins} command line option can be used to specify which plugins should be active. """ self.config.load(["--monitor-plugins", " ComputerInfo, LoadAverage "]) self.assertEqual( self.config.plugin_factories, ["ComputerInfo", "LoadAverage"]) def test_flush_interval(self): """ The C{--flush-interval} command line option can be used to specify the flush interval. """ self.config.load(["--flush-interval", "123"]) self.assertEqual(self.config.flush_interval, 123) landscape-client-14.01/landscape/monitor/tests/test_activeprocessinfo.py0000644000175000017500000010150712301414317026463 0ustar andreasandreasimport operator import os import shutil import tempfile import subprocess from twisted.internet.defer import fail from landscape.monitor.activeprocessinfo import ActiveProcessInfo from landscape.tests.helpers import (LandscapeTest, MonitorHelper, ProcessDataBuilder) from landscape.tests.mocker import ANY from landscape import SERVER_API class ActiveProcessInfoTest(LandscapeTest): """Active process info plugin tests.""" helpers = [MonitorHelper] def setUp(self): """Initialize helpers and sample data builder.""" LandscapeTest.setUp(self) self.sample_dir = tempfile.mkdtemp() self.builder = ProcessDataBuilder(self.sample_dir) self.mstore.set_accepted_types(["active-process-info"]) def tearDown(self): """Clean up sample data artifacts.""" shutil.rmtree(self.sample_dir) LandscapeTest.tearDown(self) def test_first_run_includes_kill_message(self): """Test ensures that the first run queues a kill-processes message.""" plugin = ActiveProcessInfo(uptime=10) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertEqual(message["kill-all-processes"], True) self.assertTrue("add-processes" in message) def test_only_first_run_includes_kill_message(self): """Test ensures that only the first run queues a kill message.""" self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=10, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) self.monitor.exchange() self.builder.create_data(671, self.builder.STOPPED, uid=1000, gid=1000, started_after_boot=15, process_name="blargh") self.monitor.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) message = messages[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) message = messages[1] self.assertEqual(message["type"], "active-process-info") self.assertTrue("add-processes" in message) def test_terminating_process_race(self): """Test that the plugin handles process termination races. There is a potential race in the time between getting a list of process directories in C{/proc} and reading C{/proc//status} or C{/proc//stat}. The process with C{} may terminate and causing status (or stat) to be removed in this window, resulting in an file-not-found IOError. This test simulates race behaviour by creating a directory for a process without a C{status} or C{stat} file. """ directory = tempfile.mkdtemp() try: os.mkdir(os.path.join(directory, "42")) plugin = ActiveProcessInfo(proc_dir=directory, uptime=10) self.monitor.add(plugin) plugin.exchange() finally: shutil.rmtree(directory) def test_read_proc(self): """Test reading from /proc.""" plugin = ActiveProcessInfo(uptime=10) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertTrue(len(messages) > 0) self.assertTrue("add-processes" in messages[0]) def test_read_sample_data(self): """Test reading a sample set of process data.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1030, process_name="init") self.builder.create_data(671, self.builder.STOPPED, uid=1000, gid=1000, started_after_boot=1110, process_name="blargh") self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=1120, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": "init", "uid": 0, "start-time": 103, "percent-cpu": 0.0} expected_process_1 = {"state": "T", "gid": 1000, "pid": 671, "vm-size": 11676, "name": "blargh", "uid": 1000, "start-time": 111, "percent-cpu": 0.0} expected_process_2 = {"state": "t", "gid": 1000, "pid": 672, "vm-size": 11676, "name": "blarpy", "uid": 1000, "start-time": 112, "percent-cpu": 0.0} processes = message["add-processes"] processes.sort(key=operator.itemgetter("pid")) self.assertEqual(processes, [expected_process_0, expected_process_1, expected_process_2]) def test_skip_non_numeric_subdirs(self): """Test ensures the plugin doesn't touch non-process dirs in /proc.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1120, process_name="init") directory = os.path.join(self.sample_dir, "acpi") os.mkdir(directory) self.assertTrue(os.path.isdir(directory)) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) expected_process = {"pid": 1, "state": "R", "name": "init", "vm-size": 11676, "uid": 0, "gid": 0, "start-time": 112, "percent-cpu": 0.0} self.assertEqual(message["add-processes"], [expected_process]) def test_plugin_manager(self): """Test plugin manager integration.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1100, process_name="init") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) self.monitor.exchange() self.assertMessages( self.mstore.get_pending_messages(), [{"type": "active-process-info", "kill-all-processes": True, "add-processes": [{"pid": 1, "state": "R", "name": "init", "vm-size": 11676, "uid": 0, "gid": 0, "start-time": 110, "percent-cpu": 0.0}]}]) def test_process_terminated(self): """Test that the plugin handles process changes in a diff-like way.""" # This test is *too big* self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1010, process_name="init") self.builder.create_data(671, self.builder.STOPPED, uid=1000, gid=1000, started_after_boot=1020, process_name="blargh") self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=1040, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() # Terminate a process and start another. self.builder.remove_data(671) self.builder.create_data(12753, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1070, process_name="wubble") plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) # The first time the plugin runs we expect all known processes # to be killed. message = messages[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertEqual(message["kill-all-processes"], True) self.assertTrue("add-processes" in message) expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": "init", "uid": 0, "start-time": 101, "percent-cpu": 0.0} expected_process_1 = {"state": "T", "gid": 1000, "pid": 671, "vm-size": 11676, "name": "blargh", "uid": 1000, "start-time": 102, "percent-cpu": 0.0} expected_process_2 = {"state": "t", "gid": 1000, "pid": 672, "vm-size": 11676, "name": "blarpy", "uid": 1000, "start-time": 104, "percent-cpu": 0.0} processes = message["add-processes"] processes.sort(key=operator.itemgetter("pid")) self.assertEqual(processes, [expected_process_0, expected_process_1, expected_process_2]) # Report diff-like changes to processes, such as terminated # processes and new processes. message = messages[1] self.assertEqual(message["type"], "active-process-info") self.assertTrue("add-processes" in message) self.assertEqual(len(message["add-processes"]), 1) expected_process = {"state": "R", "gid": 0, "pid": 12753, "vm-size": 11676, "name": "wubble", "uid": 0, "start-time": 107, "percent-cpu": 0.0} self.assertEqual(message["add-processes"], [expected_process]) self.assertTrue("kill-processes" in message) self.assertEqual(len(message["kill-processes"]), 1) self.assertEqual(message["kill-processes"], [671]) def test_only_queue_message_when_process_data_is_available(self): """Test ensures that messages are only queued when data changes.""" self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=10, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) plugin.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 1) plugin.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 1) def test_only_report_active_processes(self): """Test ensures the plugin only reports active processes.""" self.builder.create_data(672, self.builder.DEAD, uid=1000, gid=1000, started_after_boot=10, process_name="blarpy") self.builder.create_data(673, self.builder.ZOMBIE, uid=1000, gid=1000, started_after_boot=12, process_name="blarpitty") self.builder.create_data(674, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=13, process_name="blarpie") self.builder.create_data(675, self.builder.STOPPED, uid=1000, gid=1000, started_after_boot=14, process_name="blarping") self.builder.create_data(676, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=15, process_name="floerp") self.builder.create_data(677, self.builder.DISK_SLEEP, uid=1000, gid=1000, started_after_boot=18, process_name="floerpidity") self.builder.create_data(678, self.builder.SLEEPING, uid=1000, gid=1000, started_after_boot=21, process_name="floerpiditting") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) message = messages[0] self.assertTrue("kill-all-processes" in message) self.assertTrue("kill-processes" not in message) self.assertTrue("add-processes" in message) pids = [process["pid"] for process in message["add-processes"]] pids.sort() self.assertEqual(pids, [673, 674, 675, 676, 677, 678]) def test_report_interesting_state_changes(self): """Test ensures that interesting state changes are reported.""" self.builder.create_data(672, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="blarpy") # Report a running process. plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) message = messages[0] self.assertTrue("kill-all-processes" in message) self.assertTrue("kill-processes" not in message) self.assertTrue("add-processes" in message) self.assertEqual(message["add-processes"][0]["pid"], 672) self.assertEqual(message["add-processes"][0]["state"], u"R") # Convert the process to a zombie and ensure it gets reported. self.builder.remove_data(672) self.builder.create_data(672, self.builder.ZOMBIE, uid=1000, gid=1000, started_after_boot=10, process_name="blarpy") plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) message = messages[1] self.assertTrue("kill-all-processes" not in message) self.assertTrue("update-processes" in message) self.assertEqual(message["update-processes"][0]["state"], u"Z") def test_call_on_accepted(self): """ L{MonitorPlugin}-based plugins can provide a callable to call when a message type becomes accepted. """ plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10) self.monitor.add(plugin) self.assertEqual(len(self.mstore.get_pending_messages()), 0) result = self.monitor.fire_event( "message-type-acceptance-changed", "active-process-info", True) def assert_messages(ignored): self.assertEqual(len(self.mstore.get_pending_messages()), 1) result.addCallback(assert_messages) return result def test_resynchronize_event(self): """ When a C{resynchronize} event occurs, with 'process' scope, we should clear the information held in memory by the activeprocess monitor. """ self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1030, process_name="init") self.builder.create_data(671, self.builder.STOPPED, uid=1000, gid=1000, started_after_boot=1110, process_name="blargh") self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=1120, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() expected_messages = [{"add-processes": [ {"gid": 1000, "name": u"blarpy", "pid": 672, "start-time": 112, "state": "t", "uid": 1000, "vm-size": 11676, "percent-cpu": 0.0}, {"gid": 0, "name": u"init", "pid": 1, "start-time": 103, "state": "R", "uid": 0, "vm-size": 11676, "percent-cpu": 0.0}, {"gid": 1000, "name": u"blargh", "pid": 671, "start-time": 111, "state": "T", "uid": 1000, "vm-size": 11676, "percent-cpu": 0.0}], "kill-all-processes": True, "type": "active-process-info"}] self.assertMessages(messages, expected_messages) plugin.exchange() messages = self.mstore.get_pending_messages() # No new messages should be pending self.assertMessages(messages, expected_messages) process_scope = ["process"] self.reactor.fire("resynchronize", process_scope) plugin.exchange() messages = self.mstore.get_pending_messages() # The resynchronisation should cause the same messages to be generated # again. expected_messages.extend(expected_messages) self.assertMessages(messages, expected_messages) def test_resynchronize_event_resets_session_id(self): """ When a C{resynchronize} event occurs a new session id is acquired so that future messages can be sent. """ plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) session_id = plugin._session_id plugin.client.broker.message_store.drop_session_ids() self.reactor.fire("resynchronize") plugin.exchange() self.assertNotEqual(session_id, plugin._session_id) def test_resynchronize_event_with_global_scope(self): """ When a C{resynchronize} event occurs the L{_reset} method should be called on L{ActiveProcessInfo}. """ self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=1120, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() expected_messages = [{"add-processes": [ {"gid": 1000, "name": u"blarpy", "pid": 672, "start-time": 112, "state": "t", "uid": 1000, "vm-size": 11676, "percent-cpu": 0.0}], "kill-all-processes": True, "type": "active-process-info"}] self.assertMessages(messages, expected_messages) plugin.exchange() messages = self.mstore.get_pending_messages() # No new messages should be pending self.assertMessages(messages, expected_messages) self.reactor.fire("resynchronize") plugin.exchange() messages = self.mstore.get_pending_messages() # The resynchronisation should cause the same messages to be generated # again. expected_messages.extend(expected_messages) self.assertMessages(messages, expected_messages) def test_do_not_resynchronize_with_other_scope(self): """ When a C{resynchronize} event occurs, with an irrelevant scope, we should do nothing. """ self.builder.create_data(672, self.builder.TRACING_STOP, uid=1000, gid=1000, started_after_boot=1120, process_name="blarpy") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() messages = self.mstore.get_pending_messages() expected_messages = [{"add-processes": [ {"gid": 1000, "name": u"blarpy", "pid": 672, "start-time": 112, "state": "t", "uid": 1000, "vm-size": 11676, "percent-cpu": 0.0}], "kill-all-processes": True, "type": "active-process-info"}] self.assertMessages(messages, expected_messages) plugin.exchange() messages = self.mstore.get_pending_messages() # No new messages should be pending self.assertMessages(messages, expected_messages) disk_scope = ["disk"] self.reactor.fire("resynchronize", disk_scope) plugin.exchange() messages = self.mstore.get_pending_messages() # The resynchronisation should not have fired, so we won't see any # additional messages here. self.assertMessages(messages, expected_messages) def test_do_not_persist_changes_when_send_message_fails(self): """ When the plugin is run it persists data that it uses on subsequent checks to calculate the delta to send. It should only persist data when the broker confirms that the message sent by the plugin has been sent. """ class MyException(Exception): pass self.log_helper.ignore_errors(MyException) self.builder.create_data(672, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="python") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=10) self.monitor.add(plugin) broker_mock = self.mocker.replace(self.monitor.broker) broker_mock.send_message(ANY, ANY, urgent=ANY) self.mocker.result(fail(MyException())) self.mocker.replay() message = plugin.get_message() def assert_message(message_id): self.assertEqual(message, plugin.get_message()) result = plugin.exchange() result.addCallback(assert_message) return result def test_process_updates(self): """Test updates to processes are successfully reported.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1100, process_name="init",) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) registry_mocker = self.mocker.replace(plugin.registry) registry_mocker.flush() self.mocker.count(2) self.mocker.result(None) self.mocker.replay() plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.builder.remove_data(1) self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1100, process_name="init", vmsize=20000) plugin.exchange() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) self.assertMessages(messages, [{"timestamp": 0, "api": SERVER_API, "type": "active-process-info", "kill-all-processes": True, "add-processes": [{"start-time": 110, "name": u"init", "pid": 1, "percent-cpu": 0.0, "state": "R", "gid": 0, "vm-size": 11676, "uid": 0}]}, {"timestamp": 0, "api": SERVER_API, "type": "active-process-info", "update-processes": [ {"start-time": 110, "name": u"init", "pid": 1, "percent-cpu": 0.0, "state": "R", "gid": 0, "vm-size": 20000, "uid": 0}]}]) class PluginManagerIntegrationTest(LandscapeTest): helpers = [MonitorHelper] def setUp(self): LandscapeTest.setUp(self) self.sample_dir = self.makeDir() self.builder = ProcessDataBuilder(self.sample_dir) self.mstore.set_accepted_types(["active-process-info", "operation-result"]) def get_missing_pid(self): popen = subprocess.Popen(["hostname"], stdout=subprocess.PIPE) popen.wait() return popen.pid def get_active_process(self): return subprocess.Popen(["python", "-c", "raw_input()"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def test_read_long_process_name(self): """Test reading a process with a long name.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1030, process_name="NetworkManagerDaemon") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=2000, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": "NetworkManagerDaemon", "uid": 0, "start-time": 103, "percent-cpu": 0.0} processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) def test_strip_command_line_name_whitespace(self): """Whitespace should be stripped from command-line names.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=30, process_name=" postgres: writer process ") plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["add-processes"][0]["name"], u"postgres: writer process") def test_read_process_with_no_cmdline(self): """Test reading a process without a cmdline file.""" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=1030, process_name="ProcessWithLongName", generate_cmd_line=False) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=100, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": "ProcessWithLong", "uid": 0, "start-time": 103, "percent-cpu": 0.0} processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) def test_generate_cpu_usage(self): """ Test that we can calculate the CPU usage from system information and the /proc//stat file. """ stat_data = "1 Process S 1 0 0 0 0 0 0 0 " \ "0 0 20 20 0 0 0 0 0 0 3000 0 " \ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=None, process_name="Process", generate_cmd_line=False, stat_data=stat_data) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=400, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) processes = message["add-processes"] expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": u"Process", "uid": 0, "start-time": 300, "percent-cpu": 4.00} processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) def test_generate_cpu_usage_capped(self): """ Test that we can calculate the CPU usage from system information and the /proc//stat file, the CPU usage should be capped at 99%. """ stat_data = "1 Process S 1 0 0 0 0 0 0 0 " \ "0 0 500 500 0 0 0 0 0 0 3000 0 " \ "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" self.builder.create_data(1, self.builder.RUNNING, uid=0, gid=0, started_after_boot=None, process_name="Process", generate_cmd_line=False, stat_data=stat_data) plugin = ActiveProcessInfo(proc_dir=self.sample_dir, uptime=400, jiffies=10, boot_time=0) self.monitor.add(plugin) plugin.exchange() message = self.mstore.get_pending_messages()[0] self.assertEqual(message["type"], "active-process-info") self.assertTrue("kill-all-processes" in message) self.assertTrue("add-processes" in message) processes = message["add-processes"] expected_process_0 = {"state": "R", "gid": 0, "pid": 1, "vm-size": 11676, "name": u"Process", "uid": 0, "start-time": 300, "percent-cpu": 99.00} processes = message["add-processes"] self.assertEqual(processes, [expected_process_0]) landscape-client-14.01/landscape/monitor/tests/test_computeruptime.py0000644000175000017500000003341212301414317026016 0ustar andreasandreasfrom datetime import datetime import struct from landscape.monitor.computeruptime import (LoginInfo, LoginInfoReader, ComputerUptime, BootTimes, get_uptime) from landscape.tests.helpers import LandscapeTest, MonitorHelper from landscape.tests.mocker import ANY def append_login_data(filename, login_type=0, pid=0, tty_device="/dev/", id="", username="", hostname="", termination_status=0, exit_status=0, session_id=0, entry_time_seconds=0, entry_time_milliseconds=0, remote_ip_address=[0, 0, 0, 0]): """Append binary login data to the specified filename.""" file = open(filename, "ab") try: file.write(struct.pack(LoginInfo.RAW_FORMAT, login_type, pid, tty_device, id, username, hostname, termination_status, exit_status, session_id, entry_time_seconds, entry_time_milliseconds, remote_ip_address[0], remote_ip_address[1], remote_ip_address[2], remote_ip_address[3], "")) finally: file.close() class UptimeTest(LandscapeTest): """Test for parsing /proc/uptime data.""" def test_valid_uptime_file(self): """Test ensures that we can read a valid /proc/uptime file.""" proc_file = self.makeFile("17608.24 16179.25") self.assertEqual("%0.2f" % get_uptime(proc_file), "17608.24") class LoginInfoReaderTest(LandscapeTest): """Tests for login info file reader.""" def test_read_empty_file(self): """Test ensures the reader is resilient to empty files.""" filename = self.makeFile("") file = open(filename, "rb") try: reader = LoginInfoReader(file) self.assertEqual(reader.read_next(), None) finally: file.close() def test_read_login_info(self): """Test ensures the reader can read login info.""" filename = self.makeFile("") append_login_data(filename, login_type=1, pid=100, tty_device="/dev/", id="1", username="jkakar", hostname="localhost", termination_status=0, exit_status=0, session_id=1, entry_time_seconds=105, entry_time_milliseconds=10, remote_ip_address=[192, 168, 42, 102]) append_login_data(filename, login_type=1, pid=101, tty_device="/dev/", id="1", username="root", hostname="localhost", termination_status=0, exit_status=0, session_id=2, entry_time_seconds=235, entry_time_milliseconds=17, remote_ip_address=[192, 168, 42, 102]) file = open(filename, "rb") try: reader = LoginInfoReader(file) info = reader.read_next() self.assertEqual(info.login_type, 1) self.assertEqual(info.pid, 100) self.assertEqual(info.tty_device, "/dev/") self.assertEqual(info.id, "1") self.assertEqual(info.username, "jkakar") self.assertEqual(info.hostname, "localhost") self.assertEqual(info.termination_status, 0) self.assertEqual(info.exit_status, 0) self.assertEqual(info.session_id, 1) self.assertEqual(info.entry_time, datetime.utcfromtimestamp(105)) # FIXME Test IP address handling. -jk info = reader.read_next() self.assertEqual(info.login_type, 1) self.assertEqual(info.pid, 101) self.assertEqual(info.tty_device, "/dev/") self.assertEqual(info.id, "1") self.assertEqual(info.username, "root") self.assertEqual(info.hostname, "localhost") self.assertEqual(info.termination_status, 0) self.assertEqual(info.exit_status, 0) self.assertEqual(info.session_id, 2) self.assertEqual(info.entry_time, datetime.utcfromtimestamp(235)) # FIXME Test IP address handling. -jk info = reader.read_next() self.assertEqual(info, None) finally: file.close() def test_login_info_iterator(self): """Test ensures iteration behaves correctly.""" filename = self.makeFile("") append_login_data(filename) append_login_data(filename) file = open(filename, "rb") try: reader = LoginInfoReader(file) count = 0 for info in reader.login_info(): count += 1 self.assertEqual(count, 2) finally: file.close() class ComputerUptimeTest(LandscapeTest): """Tests for the computer-uptime plugin.""" helpers = [MonitorHelper] def setUp(self): LandscapeTest.setUp(self) self.mstore.set_accepted_types(["computer-uptime"]) def test_deliver_message(self): """Test delivering a message with the boot and shutdown times.""" wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=535) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() message = self.mstore.get_pending_messages()[0] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [535]) def test_only_deliver_unique_shutdown_messages(self): """Test that only unique shutdown messages are generated.""" wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=535) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() message = self.mstore.get_pending_messages()[0] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [535]) append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=3212) plugin.run() message = self.mstore.get_pending_messages()[1] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [3212]) def test_only_queue_messages_with_data(self): """Test ensures that messages without data are not queued.""" wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="reboot", entry_time_seconds=3212) append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=3562) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() self.assertEqual(len(self.mstore.get_pending_messages()), 1) plugin.run() self.assertEqual(len(self.mstore.get_pending_messages()), 1) def test_missing_wtmp_file(self): wtmp_filename = self.makeFile() plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_boot_time_same_as_last_known_startup_time(self): """Ensure one message is queued for duplicate startup times.""" wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="reboot", entry_time_seconds=3212) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 1) self.assertEqual(messages[0]["type"], "computer-uptime") self.assertEqual(messages[0]["startup-times"], [3212]) def test_new_startup_time_replaces_old_startup_time(self): """ Test ensures startup times are not duplicated even across restarts of the client. This is simulated by creating a new instance of the plugin. """ wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="reboot", entry_time_seconds=3212) plugin1 = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin1) plugin1.run() append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=3871) append_login_data(wtmp_filename, tty_device="~", username="reboot", entry_time_seconds=4657) plugin2 = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin2) plugin2.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) self.assertEqual(messages[0]["type"], "computer-uptime") self.assertEqual(messages[0]["startup-times"], [3212]) self.assertEqual(messages[1]["type"], "computer-uptime") self.assertEqual(messages[1]["startup-times"], [4657]) def test_check_last_logrotated_file(self): """Test ensures reading falls back to logrotated files.""" wtmp_filename = self.makeFile("") logrotated_filename = self.makeFile("", path=wtmp_filename + ".1") append_login_data(logrotated_filename, tty_device="~", username="reboot", entry_time_seconds=125) append_login_data(logrotated_filename, tty_device="~", username="shutdown", entry_time_seconds=535) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() message = self.mstore.get_pending_messages()[0] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("startup-times" in message) self.assertEqual(message["startup-times"], [125]) self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [535]) def test_check_logrotate_spillover(self): """Test ensures reading falls back to logrotated files.""" wtmp_filename = self.makeFile("") logrotated_filename = self.makeFile("", path=wtmp_filename + ".1") append_login_data(logrotated_filename, tty_device="~", username="reboot", entry_time_seconds=125) append_login_data(logrotated_filename, tty_device="~", username="shutdown", entry_time_seconds=535) append_login_data(wtmp_filename, tty_device="~", username="reboot", entry_time_seconds=1025) append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=1150) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() messages = self.mstore.get_pending_messages() self.assertEqual(len(messages), 2) message = messages[0] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("startup-times" in message) self.assertEqual(message["startup-times"], [125]) self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [535]) message = messages[1] self.assertTrue("type" in message) self.assertEqual(message["type"], "computer-uptime") self.assertTrue("startup-times" in message) self.assertEqual(message["startup-times"], [1025]) self.assertTrue("shutdown-times" in message) self.assertEqual(message["shutdown-times"], [1150]) def test_call_on_accepted(self): wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=535) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) remote_broker_mock = self.mocker.replace(self.remote) remote_broker_mock.send_message(ANY, ANY, urgent=True) self.mocker.replay() self.reactor.fire(("message-type-acceptance-changed", "computer-uptime"), True) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ self.mstore.set_accepted_types([]) wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=535) plugin = ComputerUptime(wtmp_file=wtmp_filename) self.monitor.add(plugin) plugin.run() self.mstore.set_accepted_types(["computer-uptime"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) class BootTimesTest(LandscapeTest): def test_fallback_to_uptime(self): """ When no data is available in C{/var/log/wtmp} L{BootTimes.get_last_boot_time} falls back to C{/proc/uptime}. """ wtmp_filename = self.makeFile("") append_login_data(wtmp_filename, tty_device="~", username="shutdown", entry_time_seconds=535) self.assertTrue(BootTimes(filename=wtmp_filename).get_last_boot_time()) landscape-client-14.01/landscape/monitor/tests/test_cpuusage.py0000644000175000017500000002314612301414317024553 0ustar andreasandreasfrom landscape.monitor.cpuusage import CPUUsage, LAST_MESURE_KEY from landscape.tests.helpers import LandscapeTest, MonitorHelper class CPUUsagePluginTest(LandscapeTest): helpers = [MonitorHelper] def _write_stat_file(self, contents): statfile = self.makeFile() with open(statfile, "w") as f: f.write(contents) return statfile def test_get_cpu_usage_file_unreadable(self): """ When the file is unreadable or somehow creates an IOError (like when it doesn't exist), the method returns None. """ self.log_helper.ignore_errors("Could not open.*") thefile = "/tmp/whatever/I/do/not/exist" plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) self.assertIs(None, result) def test_get_cpu_usage_file_not_changed(self): """ When the stat file did not change between calls, the C{_get_cpu_usage} method returns None. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0" thefile = self._write_stat_file(contents1) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) # The first run will return None since we don't have a previous measure # yet. self.assertIs(None, result) result = plugin._get_cpu_usage(stat_file=thefile) self.assertIs(None, result) def test_get_cpu_usage_multiline_files(self): """ The C{_get_cpu_usage} method parses multiline stat files correctly. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0\nsome garbage" thefile = self._write_stat_file(contents1) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) # The first run will return None since we don't have a previous measure # yet. self.assertIs(None, result) result = plugin._get_cpu_usage(stat_file=thefile) self.assertIs(None, result) def test_get_cpu_usage_100_percent_usage(self): """ When two consecutive calls to C{_get_cpu_usage} show a CPU usage of 100%, the method returns 1. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0" contents2 = "cpu 200 100 100 100 100 100 100 0 0 0" thefile = self._write_stat_file(contents1) thefile2 = self._write_stat_file(contents2) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) # The first run will return None since we don't have a previous measure # yet. self.assertIs(None, result) result = plugin._get_cpu_usage(stat_file=thefile2) self.assertEqual(result, 1) def test_get_cpu_usage_0_percent_usage(self): """ When two consecutive calls to C{_get_cpu_usage} show a CPU usage of 0% (all the changes are in the idle column) the method returns 0. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0" contents2 = "cpu 100 100 100 200 100 100 100 0 0 0" thefile = self._write_stat_file(contents1) thefile2 = self._write_stat_file(contents2) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) # The first run will return None since we don't have a previous measure # yet. self.assertIs(None, result) result = plugin._get_cpu_usage(stat_file=thefile2) self.assertEqual(result, 0) def test_get_cpu_usage_50_percent_usage(self): """ When two consecutive calls to C{_get_cpu_usage} show a CPU usage of 50% (as much changed in an "active" column that in the idle column) the method returns 0.5. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0" contents2 = "cpu 200 100 100 200 100 100 100 0 0 0" thefile = self._write_stat_file(contents1) thefile2 = self._write_stat_file(contents2) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) result = plugin._get_cpu_usage(stat_file=thefile) # The first run will return None since we don't have a previous measure # yet. self.assertIs(None, result) result = plugin._get_cpu_usage(stat_file=thefile2) self.assertEqual(result, 0.5) def test_get_cpu_usage_after_reboot(self): """ When the computer just rebooted, we might have a case where the previous values are larger that the current values (since the kernel counts quantums allocated since boot). In this case, the method should return None. """ contents1 = "cpu 100 100 100 100 100 100 100 0 0 0" measure1 = (700, 100) measure2 = (900, 10) thefile = self._write_stat_file(contents1) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) plugin._persist.set(LAST_MESURE_KEY, measure2) result = plugin._get_cpu_usage(stat_file=thefile) self.assertIs(None, result) self.assertEqual(measure1, plugin._persist.get(LAST_MESURE_KEY)) def test_create_message(self): """ Calling create_message returns an expected message. """ plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) plugin._cpu_usage_points = [] message = plugin.create_message() self.assertIn("type", message) self.assertEqual(message["type"], "cpu-usage") self.assertIn("cpu-usages", message) cpu_usages = message["cpu-usages"] self.assertEqual(len(cpu_usages), 0) point = (60, 1.0) plugin._cpu_usage_points = [point] message = plugin.create_message() self.assertIn("type", message) self.assertEqual(message["type"], "cpu-usage") self.assertIn("cpu-usages", message) cpu_usages = message["cpu-usages"] self.assertEqual(len(cpu_usages), 1) self.assertEqual(point, cpu_usages[0]) def test_never_exchange_empty_messages(self): """ The plugin will create a message with an empty C{cpu-usages} list when no previous data is available. If an empty message is created during exchange, it should not be queued. """ self.mstore.set_accepted_types(["cpu-usage"]) plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) self.monitor.exchange() self.assertEqual(len(self.mstore.get_pending_messages()), 0) def test_exchange_messages(self): """ The CPU usage plugin queues message when manager.exchange() is called. """ self.mstore.set_accepted_types(["cpu-usage"]) plugin = CPUUsage(create_time=self.reactor.time) plugin._cpu_usage_points = [(60, 1.0)] self.monitor.add(plugin) self.monitor.exchange() self.assertMessages(self.mstore.get_pending_messages(), [{"type": "cpu-usage", "cpu-usages": [(60, 1.0)]}]) def test_no_message_if_not_accepted(self): """ Don't add any messages at all if the broker isn't currently accepting their type. """ interval = 30 plugin = CPUUsage(create_time=self.reactor.time, interval=interval) self.monitor.add(plugin) self.reactor.advance(self.monitor.step_size * 2) self.monitor.exchange() self.mstore.set_accepted_types(["cpu-usage"]) self.assertMessages(list(self.mstore.get_pending_messages()), []) def test_plugin_run(self): """ The plugin's run() method fills in the _cpu_usage_points with accumulated samples after each C{monitor.step_size} period. """ plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) def fake_get_cpu_usage(self): return 1.0 plugin._get_cpu_usage = fake_get_cpu_usage self.reactor.advance(self.monitor.step_size * 2) self.assertNotEqual([], plugin._cpu_usage_points) self.assertEqual([(300, 1.0), (600, 1.0)], plugin._cpu_usage_points) def test_plugin_run_with_None(self): """ The plugin's run() method fills in the _cpu_usage_points with accumulated samples after each C{monitor.step_size} period. Holes in the data (in case of error the method returns None) are handled gracefully, and are filled with averaged data. """ plugin = CPUUsage(create_time=self.reactor.time) self.monitor.add(plugin) def fake_get_cpu_usage(self): return 1.0 def fake_get_cpu_usage_none(self): return None plugin._get_cpu_usage = fake_get_cpu_usage self.reactor.advance(self.monitor.step_size) plugin._get_cpu_usage = fake_get_cpu_usage_none self.reactor.advance(self.monitor.step_size) self.assertNotEqual([], plugin._cpu_usage_points) self.assertEqual([(300, 1.0)], plugin._cpu_usage_points) # If we record values once again the "blank" period will be smoothed # over with the new points. plugin._get_cpu_usage = fake_get_cpu_usage self.reactor.advance(self.monitor.step_size) self.assertEqual([(300, 1.0), (600, 1.0), (900, 1.0)], plugin._cpu_usage_points) landscape-client-14.01/landscape/monitor/tests/test_packagemonitor.py0000644000175000017500000003030212301414317025732 0ustar andreasandreasimport os from twisted.internet.defer import Deferred from landscape.package.reporter import find_reporter_command from landscape.package.store import PackageStore from landscape.monitor.packagemonitor import PackageMonitor from landscape.tests.helpers import ( LandscapeTest, EnvironSaverHelper, MonitorHelper) class PackageMonitorTest(LandscapeTest): """Tests for the temperature plugin.""" helpers = [EnvironSaverHelper, MonitorHelper] def setUp(self): """Initialize test helpers and create a sample thermal zone.""" super(PackageMonitorTest, self).setUp() self.package_store_filename = self.makeFile() self.package_store = PackageStore(self.package_store_filename) self.package_monitor = PackageMonitor(self.package_store_filename) def createReporterTask(self): """ Put a task for the package reported into the package store. """ message = {"type": "package-ids", "ids": [None], "request-id": 1} return self.package_store.add_task("reporter", message) def assertSingleReporterTask(self, data, task_id): """ Check that we have exactly one task, that it contains the right data and that it's ID matches our expectation. """ # The next task should contain the passed data. task = self.package_store.get_next_task("reporter") self.assertEqual(task.data, data) # We want to make sure it has the correct id of 2 so that we # know it's not a new task that the reporter could possibly # remove by accident. self.assertEqual(task.id, task_id) # Let's remove that task and make sure there are no more tasks # in the queue. task.remove() task = self.package_store.get_next_task("reporter") self.assertEqual(task, None) def test_create_default_store_upon_message_handling(self): """ If the package sqlite database file doesn't exist yet, it is created upon message handling. """ filename = os.path.join(self.broker_service.config.data_path, "package/database") package_monitor = PackageMonitor() os.unlink(filename) self.assertFalse(os.path.isfile(filename)) self.monitor.add(package_monitor) package_monitor_mock = self.mocker.patch(package_monitor) package_monitor_mock.spawn_reporter() self.mocker.replay() message = {"type": "package-ids"} self.monitor.dispatch_message(message) self.assertTrue(os.path.isfile(filename)) def test_run_interval(self): """ The C{run_interval} of L{PackageMonitor} can be customized via the C{package_monitor_interval} configuration parameter. """ self.monitor.config.package_monitor_interval = 1234 self.package_monitor.register(self.monitor) self.assertEqual(1234, self.package_monitor.run_interval) def test_dont_spawn_reporter_if_message_not_accepted(self): self.monitor.add(self.package_monitor) package_monitor_mock = self.mocker.patch(self.package_monitor) package_monitor_mock.spawn_reporter() self.mocker.count(0) self.mocker.replay() return self.package_monitor.run() def test_spawn_reporter_on_registration_when_already_accepted(self): package_monitor_mock = self.mocker.patch(self.package_monitor) package_monitor_mock.spawn_reporter() # Slightly tricky as we have to wait for the result of run(), # but we don't have its deferred yet. To handle it, we create # our own deferred, and register a callback for when run() # returns, chaining both deferreds at that point. deferred = Deferred() def run_has_run(run_result_deferred): return run_result_deferred.chainDeferred(deferred) package_monitor_mock.run() self.mocker.passthrough(run_has_run) self.mocker.replay() self.broker_service.message_store.set_accepted_types(["packages"]) self.monitor.add(self.package_monitor) return deferred def test_spawn_reporter_on_run_if_message_accepted(self): self.broker_service.message_store.set_accepted_types(["packages"]) package_monitor_mock = self.mocker.patch(self.package_monitor) package_monitor_mock.spawn_reporter() self.mocker.count(2) # Once for registration, then again explicitly. self.mocker.replay() self.monitor.add(self.package_monitor) return self.package_monitor.run() def test_package_ids_handling(self): self.monitor.add(self.package_monitor) package_monitor_mock = self.mocker.patch(self.package_monitor) package_monitor_mock.spawn_reporter() self.mocker.replay() message = {"type": "package-ids", "ids": [None], "request-id": 1} self.monitor.dispatch_message(message) task = self.package_store.get_next_task("reporter") self.assertTrue(task) self.assertEqual(task.data, message) def test_spawn_reporter(self): command = self.makeFile("#!/bin/sh\necho 'I am the reporter!' >&2\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_reporter_command) find_command_mock() self.mocker.result(command) self.mocker.replay() package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) result = package_monitor.spawn_reporter() def got_result(result): log = self.logfile.getvalue() self.assertIn("I am the reporter!", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_reporter_without_output(self): find_command_mock = self.mocker.replace(find_reporter_command) find_command_mock() self.mocker.result("/bin/true") self.mocker.replay() package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) result = package_monitor.spawn_reporter() def got_result(result): log = self.logfile.getvalue() self.assertNotIn("reporter output", log) return result.addCallback(got_result) def test_spawn_reporter_copies_environment(self): command = self.makeFile("#!/bin/sh\necho VAR: $VAR\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_reporter_command) find_command_mock() self.mocker.result(command) self.mocker.replay() package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) os.environ["VAR"] = "HI!" result = package_monitor.spawn_reporter() def got_result(result): log = self.logfile.getvalue() self.assertIn("VAR: HI!", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_reporter_passes_quiet_option(self): command = self.makeFile("#!/bin/sh\necho OPTIONS: $@\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_reporter_command) find_command_mock() self.mocker.result(command) self.mocker.replay() package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) result = package_monitor.spawn_reporter() def got_result(result): log = self.logfile.getvalue() self.assertIn("OPTIONS: --quiet", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_call_on_accepted(self): package_monitor_mock = self.mocker.patch(self.package_monitor) package_monitor_mock.spawn_reporter() self.mocker.replay() self.monitor.add(self.package_monitor) self.monitor.reactor.fire( ("message-type-acceptance-changed", "packages"), True) def test_resynchronize(self): """ If a 'resynchronize' reactor event is fired with 'package' scope, the package monitor should clear all queued tasks and queue a task that tells the report to clear out the rest of the package data. """ self.monitor.add(self.package_monitor) self.createReporterTask() # The server doesn't currently send 'package' scope, but we should # support it in case we change that in the future. package_scope = ["package"] self.monitor.reactor.fire("resynchronize", package_scope) self.assertSingleReporterTask({"type": "resynchronize"}, 2) def test_resynchronize_gets_new_session_id(self): """ When a 'resynchronize' reactor event is fired, the C{PackageMonitor} acquires a new session ID (as the old one will be blocked). """ self.monitor.add(self.package_monitor) session_id = self.package_monitor._session_id self.createReporterTask() self.package_monitor.client.broker.message_store.drop_session_ids() self.monitor.reactor.fire("resynchronize") self.assertNotEqual(session_id, self.package_monitor._session_id) def test_resynchronize_on_global_scope(self): """ If a 'resynchronize' reactor event is fired with global scope (the empty list) , the package monitor should act as if it were an event with 'package' scope. """ self.monitor.add(self.package_monitor) self.createReporterTask() self.monitor.reactor.fire("resynchronize") # The next task should be the resynchronize message. self.assertSingleReporterTask({"type": "resynchronize"}, 2) def test_not_resynchronize_with_other_scope(self): """ If a 'resynchronize' reactor event is fired with an irrelevant scope, the package monitor should not respond to this. """ self.monitor.add(self.package_monitor) task = self.createReporterTask() disk_scope = ["disk"] self.monitor.reactor.fire("resynchronize", disk_scope) # The next task should *not* be the resynchronize message, but instead # the original task we created. self.assertSingleReporterTask(task.data, task.id) def test_spawn_reporter_doesnt_chdir(self): command = self.makeFile("#!/bin/sh\necho RUN\n") os.chmod(command, 0755) cwd = os.getcwd() self.addCleanup(os.chdir, cwd) dir = self.makeDir() os.chdir(dir) os.chmod(dir, 0) find_command_mock = self.mocker.replace(find_reporter_command) find_command_mock() self.mocker.result(command) self.mocker.replay() package_monitor = PackageMonitor(self.package_store_filename) self.monitor.add(package_monitor) result = package_monitor.spawn_reporter() def got_result(result): log = self.logfile.getvalue() self.assertIn("RUN", log) # restore permissions to the dir so tearDown can clean it up os.chmod(dir, 0766) return result.addCallback(got_result) def test_changing_server_uuid_clears_hash_ids(self): """ The package hash=>id map is server-specific, so when we change servers, we should reset this map. """ self.package_store.set_hash_ids({"hash1": 1, "hash2": 2}) self.monitor.add(self.package_monitor) self.monitor.reactor.fire("server-uuid-changed", "old", "new") self.assertEqual(self.package_store.get_hash_id("hash1"), None) self.assertEqual(self.package_store.get_hash_id("hash2"), None) def test_changing_server_uuid_wont_clear_hash_ids_with_old_uuid_none(self): """ If the old UUID is unknown, that means the client just started talking to a server that knows how to communicate its UUID, so we don't want to clear the old hashes in this case. """ self.package_store.set_hash_ids({"hash1": 1, "hash2": 2}) self.monitor.add(self.package_monitor) self.monitor.reactor.fire("server-uuid-changed", None, "new-uuid") self.assertEqual(self.package_store.get_hash_id("hash1"), 1) self.assertEqual(self.package_store.get_hash_id("hash2"), 2) landscape-client-14.01/landscape/monitor/packagemonitor.py0000644000175000017500000001403212301414317023533 0ustar andreasandreasimport logging import os from twisted.internet.utils import getProcessOutput from landscape.package.store import PackageStore from landscape.package.reporter import find_reporter_command from landscape.monitor.plugin import MonitorPlugin class PackageMonitor(MonitorPlugin): run_interval = 1800 scope = "package" def __init__(self, package_store_filename=None): super(PackageMonitor, self).__init__() if package_store_filename: self._package_store = PackageStore(package_store_filename) else: self._package_store = None self._reporter_command = find_reporter_command() def register(self, registry): self.config = registry.config self.run_interval = self.config.package_monitor_interval if self.config.clones and self.config.is_clone: # Run clones a bit more frequently in order to catch up self.run_interval = 60 # 300 super(PackageMonitor, self).register(registry) if not self._package_store: filename = os.path.join(registry.config.data_path, "package/database") self._package_store = PackageStore(filename) registry.register_message("package-ids", self._enqueue_message_as_reporter_task) registry.reactor.call_on("server-uuid-changed", self._server_uuid_changed) self.call_on_accepted("packages", self.spawn_reporter) self.run() def _enqueue_message_as_reporter_task(self, message): self._package_store.add_task("reporter", message) self.spawn_reporter() def run(self): result = self.registry.broker.get_accepted_message_types() result.addCallback(self._got_message_types) return result def _got_message_types(self, message_types): if "packages" in message_types: self.spawn_reporter() def _run_fake_reporter(self, args): """Run a fake-reporter in-process.""" class FakeFacade(object): """ A fake facade to workaround the issue that the AptFacade essentially allows only once instance per process. """ def get_arch(self): arch = os.uname()[-1] result = {"pentium": "i386", "i86pc": "i386", "x86_64": "amd64"}.get(arch) if result: arch = result elif (arch[0] == "i" and arch.endswith("86")): arch = "i386" return arch if getattr(self, "_fake_reporter", None) is None: from landscape.package.reporter import ( FakeReporter, PackageReporterConfiguration) from landscape.package.store import FakePackageStore package_facade = FakeFacade() package_config = PackageReporterConfiguration() package_config.load(args + ["-d", self.config.data_path, "-l", self.config.log_dir]) package_store = FakePackageStore(package_config.store_filename) self._fake_reporter = FakeReporter(package_store, package_facade, self.registry.broker, package_config) self._fake_reporter.global_store_filename = os.path.join( self.config.master_data_path, "package", "database") self._fake_reporter_running = False if self._fake_reporter_running: from twisted.internet.defer import succeed return succeed(None) self._fake_reporter_running = True result = self._fake_reporter.run() def done(passthrough): self._fake_reporter_running = False return passthrough return result.addBoth(done) def spawn_reporter(self): args = ["--quiet"] if self.config.config: args.extend(["-c", self.config.config]) env = os.environ.copy() if self.config.clones > 0: if self.config.is_clone: return self._run_fake_reporter(args) else: env["FAKE_GLOBAL_PACKAGE_STORE"] = "1" # path is set to None so that getProcessOutput does not # chdir to "." see bug #211373 result = getProcessOutput(self._reporter_command, args=args, env=env, errortoo=1, path=None) result.addCallback(self._got_reporter_output) return result def _got_reporter_output(self, output): if output: logging.warning("Package reporter output:\n%s" % output) def _reset(self): """ Remove all tasks *except* the resynchronize task. This is because if we clear all tasks, then add the resynchronize, it's possible that the reporter may be running a task at this time and when it finishes, it will unknowningly remove the resynchronize task because sqlite resets its serial primary keys when you delete an entire table. This problem is avoided by adding the task first and removing them all *except* the resynchronize task and not causing sqlite to reset the serial key. """ task = self._package_store.add_task("reporter", {"type": "resynchronize"}) self._package_store.clear_tasks(except_tasks=(task,)) def _server_uuid_changed(self, old_uuid, new_uuid): """Called when the broker sends a server-uuid-changed event. The package hash=>id map is server-specific, so when we change servers, we should reset this map. """ # If the old_uuid is None, it means we're just starting to # communicate with a server that knows how to report its UUID, # so we don't clear our knowledge. if old_uuid is not None: self._package_store.clear_hash_ids() landscape-client-14.01/landscape/monitor/__init__.py0000644000175000017500000000017312301414317022270 0ustar andreasandreas""" The monitor extracts data about the local machine and sends it in messages to the Landcsape server via the broker. """ landscape-client-14.01/landscape/monitor/updatemanager.py0000644000175000017500000000535012301414317023350 0ustar andreasandreasimport ConfigParser import os import logging from landscape.monitor.plugin import MonitorPlugin class UpdateManager(MonitorPlugin): """ Report on changes to the update-manager configuration. @param update_manager_filename: the path to the update-manager configuration file. """ # This file is used by the update-manager and may contain a "Prompt" # variable which indicates that users are prompted to upgrade the release # when any new release is available ("normal"); when a new LTS release is # available ("lts"); or never ("never"). update_manager_filename = "/etc/update-manager/release-upgrades" persist_name = "update-manager" scope = "package" run_interval = 3600 # 1 hour run_immediately = True def __init__(self, update_manager_filename=None): if update_manager_filename is not None: self.update_manager_filename = update_manager_filename def _get_prompt(self): """ Retrieve the update-manager upgrade prompt which dictates when we should prompt users to upgrade the release. Current valid values are "normal" (prompt on all the availability of all releases), "lts" (prompt only when LTS releases are available), and "never". """ if not os.path.exists(self.update_manager_filename): # There is no config, so we just act as if it's set to 'normal' return "normal" config_file = open(self.update_manager_filename) parser = ConfigParser.SafeConfigParser() parser.readfp(config_file) prompt = parser.get("DEFAULT", "Prompt") valid_prompts = ["lts", "never", "normal"] if prompt not in valid_prompts: prompt = "normal" message = ("%s contains invalid Prompt value. " "Should be one of %s." % ( self.update_manager_filename, valid_prompts)) logging.warning(message) return prompt def send_message(self): """ Send the current upgrade release prompt to the server. """ prompt = self._get_prompt() if prompt == self._persist.get("prompt"): return self._persist.set("prompt", prompt) message = { "type": "update-manager-info", "prompt": prompt} logging.info("Queueing message with updated " "update-manager status.") return self.registry.broker.send_message(message, self._session_id) def run(self): """ Send the update-manager-info messages, if the server accepts them. """ return self.registry.broker.call_if_accepted( "update-manager-info", self.send_message) landscape-client-14.01/landscape/monitor/monitor.py0000644000175000017500000000202612301414317022217 0ustar andreasandreas"""The Landscape monitor plugin system.""" import os from landscape.broker.client import BrokerClient class Monitor(BrokerClient): """The central point of integration in the Landscape monitor.""" name = "monitor" def __init__(self, reactor, config, persist, persist_filename=None, step_size=5 * 60): super(Monitor, self).__init__(reactor) self.reactor = reactor self.config = config self.persist = persist self.persist_filename = persist_filename if persist_filename and os.path.exists(persist_filename): self.persist.load(persist_filename) self._plugins = [] self.step_size = step_size self.reactor.call_every(self.config.flush_interval, self.flush) def flush(self): """Flush data to disk.""" if self.persist_filename: self.persist.save(self.persist_filename) def exchange(self): """Call C{exchange} on all plugins.""" super(Monitor, self).exchange() self.flush() landscape-client-14.01/landscape/monitor/processorinfo.py0000644000175000017500000002224412301414317023427 0ustar andreasandreasimport logging import os import re from landscape.plugin import PluginConfigError from landscape.monitor.plugin import MonitorPlugin class ProcessorInfo(MonitorPlugin): """Plugin captures information about the processor(s) in this machine. This plugin runs once per client session. When processor information is retrieved it's compared against the last known processor information, which is saved in persistent storage. A message is only put on the message queue if the latest processor information differs from the last known processor information. The information available from /proc/cpuinfo varies per platform. For example, an Apple PowerMac Dual G5 doesn't contain a vendor ID and provides the processor name in the 'cpu' field, as opposed to the 'model name' field used on x86-based hardware. For reasons such as this, the schema of the data reported by this plugin is flexible. Only 'processor-id' and 'model' are guaranteed to be present. In order to deal with the vagaries of parsing /proc/cpu information on the various platforms we support, message generation is deferred to per-platform message factories. @param delay: Set the starting delay. @param machine_name: The machine name to report. @param source_filename: The filesystem path to read information from. """ persist_name = "processor-info" scope = "cpu" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None def __init__(self, delay=2, machine_name=None, source_filename="/proc/cpuinfo"): self._delay = delay self._source_filename = source_filename if machine_name is None: machine_name = os.uname()[4] self._cpu_info_reader = self._create_cpu_info_reader(machine_name, source_filename) def _create_cpu_info_reader(self, machine_name, source_filename): """Return a message factory suitable for the specified machine name.""" for pair in message_factories: regexp = re.compile(pair[0]) if regexp.match(machine_name): return pair[1](source_filename) raise PluginConfigError("A processor info reader for '%s' is not " "available." % machine_name) def register(self, registry): """Register this plugin with the specified plugin registry.""" super(ProcessorInfo, self).register(registry) self.registry.reactor.call_later(self._delay, self.run) self.call_on_accepted("processor-info", self.send_message, True) def create_message(self): """Retrieve processor information and generate a message.""" return {"type": "processor-info", "processors": self._cpu_info_reader.create_message()} def send_message(self, urgent=False): dirty = False message = self.create_message() for processor in message["processors"]: key = ("processor", str(processor["processor-id"])) cached_processor = self._persist.get(key) if cached_processor is None: cached_processor = {} self._update(cached_processor, processor) dirty = True else: if self._has_changed(cached_processor, processor): self._update(cached_processor, processor) dirty = True if dirty: logging.info("Queueing message with updated processor info.") self.registry.broker.send_message( message, self._session_id, urgent=urgent) def run(self, urgent=False): """Create a message and put it on the message queue.""" self.registry.broker.call_if_accepted("processor-info", self.send_message, urgent) def _has_changed(self, processor, message): """Returns true if processor details changed since the last read.""" if processor["model"] != message["model"]: return True if processor["vendor"] != message.get("vendor", ""): return True if processor["cache_size"] != message.get("cache-size", -1): return True return False def _update(self, processor, message): """Update the processor details with current values.""" processor["id"] = message["processor-id"] processor["model"] = message["model"] processor["cache_size"] = message.get("cache-size", -1) processor["vendor"] = message.get("vendor", "") self._persist.set(("processor", str(message["processor-id"])), processor) class PowerPCMessageFactory: """Factory for ppc-based processors provides processor information. @param source_filename: The file name of the data source. """ def __init__(self, source_filename): self._source_filename = source_filename def create_message(self): """Returns a list containing information about each processor.""" processors = [] file = open(self._source_filename) try: current = None for line in file: parts = line.split(":", 1) key = parts[0].strip() if key == "processor": current = {"processor-id": int(parts[1].strip())} processors.append(current) elif key == "cpu": current["model"] = parts[1].strip() finally: file.close() return processors class ARMMessageFactory: """Factory for arm-based processors provides processor information. @param source_filename: The file name of the data source. """ def __init__(self, source_filename): self._source_filename = source_filename def create_message(self): """Returns a list containing information about each processor.""" processors = [] file = open(self._source_filename) try: regexp = re.compile("(?P.*?)\s*:\s*(?P.*)") current = {} for line in file: match = regexp.match(line.strip()) if match: key = match.group("key") value = match.group("value") if key == "Processor": # ARM doesn't support SMP, thus no processor-id in # the cpuinfo current["processor-id"] = 0 current["model"] = value elif key == "Cache size": current["cache-size"] = int(value) if current: processors.append(current) finally: file.close() return processors class SparcMessageFactory: """Factory for sparc-based processors provides processor information. @param source_filename: The file name of the data source. """ def __init__(self, source_filename): self._source_filename = source_filename def create_message(self): """Returns a list containing information about each processor.""" processors = [] model = None file = open(self._source_filename) try: regexp = re.compile("CPU(\d{1})+") for line in file: parts = line.split(":", 1) key = parts[0].strip() if key == "cpu": model = parts[1].strip() elif regexp.match(key): start, end = re.compile("\d+").search(key).span() message = {"processor-id": int(key[start:end]), "model": model} processors.append(message) finally: file.close() return processors class X86MessageFactory: """Factory for x86-based processors provides processor information. @param source_filename: The file name of the data source. """ def __init__(self, source_filename): self._source_filename = source_filename def create_message(self): """Returns a list containing information about each processor.""" processors = [] file = open(self._source_filename) try: current = None for line in file: parts = line.split(":", 1) key = parts[0].strip() if key == "processor": current = {"processor-id": int(parts[1].strip())} processors.append(current) elif key == "vendor_id": current["vendor"] = parts[1].strip() elif key == "model name": current["model"] = parts[1].strip() elif key == "cache size": value_parts = parts[1].split() current["cache-size"] = int(value_parts[0].strip()) finally: file.close() return processors message_factories = [("arm*", ARMMessageFactory), ("ppc(64)?", PowerPCMessageFactory), ("sparc[64]", SparcMessageFactory), ("i[3-7]86|x86_64", X86MessageFactory)] landscape-client-14.01/landscape/monitor/memoryinfo.py0000644000175000017500000000523112301414317022715 0ustar andreasandreasimport time from landscape.lib.monitor import CoverageMonitor from landscape.lib.sysstats import MemoryStats from landscape.accumulate import Accumulator from landscape.monitor.plugin import MonitorPlugin class MemoryInfo(MonitorPlugin): """Plugin captures information about free memory and free swap.""" persist_name = "memory-info" scope = "memory" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None def __init__(self, interval=15, monitor_interval=60 * 60, source_filename="/proc/meminfo", create_time=time.time): self._interval = interval self._monitor_interval = monitor_interval self._source_filename = source_filename self._memory_info = [] self._create_time = create_time def register(self, registry): super(MemoryInfo, self).register(registry) self._accumulate = Accumulator(self._persist, self.registry.step_size) self.registry.reactor.call_every(self._interval, self.run) self._monitor = CoverageMonitor(self._interval, 0.8, "memory/swap snapshot", create_time=self._create_time) self.registry.reactor.call_every(self._monitor_interval, self._monitor.log) self.registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("memory-info", self.send_message, True) def create_message(self): memory_info = self._memory_info self._memory_info = [] return {"type": "memory-info", "memory-info": memory_info} def send_message(self, urgent=False): message = self.create_message() if len(message["memory-info"]): self.registry.broker.send_message( message, self._session_id, urgent=urgent) def exchange(self, urgent=False): self.registry.broker.call_if_accepted("memory-info", self.send_message, urgent) def run(self): self._monitor.ping() new_timestamp = int(self._create_time()) memstats = MemoryStats(self._source_filename) memory_step_data = self._accumulate( new_timestamp, memstats.free_memory, "accumulate-memory") swap_step_data = self._accumulate( new_timestamp, memstats.free_swap, "accumulate-swap") if memory_step_data and swap_step_data: timestamp = memory_step_data[0] free_memory = int(memory_step_data[1]) free_swap = int(swap_step_data[1]) self._memory_info.append((timestamp, free_memory, free_swap)) landscape-client-14.01/landscape/monitor/networkdevice.py0000644000175000017500000000260212301414317023401 0ustar andreasandreas""" A monitor plugin that collects data on a machine's network devices. """ from landscape.monitor.plugin import DataWatcher from landscape.lib.network import get_active_device_info class NetworkDevice(DataWatcher): message_type = "network-device" message_key = "devices" persist_name = message_type scope = "network" def __init__(self, device_info=get_active_device_info): super(NetworkDevice, self).__init__() self._device_info = device_info def register(self, registry): super(NetworkDevice, self).register(registry) self.call_on_accepted(self.message_type, self.exchange, True) def get_message(self): device_data = self._device_info() # Persist if the info is new. if self._persist.get("network-device-data") != device_data: self._persist.set("network-device-data", device_data) # We need to split the message in two top-level keys (see bug) device_speeds = [] for device in device_data: speed_entry = {"interface": device["interface"]} speed_entry["speed"] = device.pop("speed") speed_entry["duplex"] = device.pop("duplex") device_speeds.append(speed_entry) return {"type": self.message_type, "devices": device_data, "device-speeds": device_speeds} landscape-client-14.01/landscape/monitor/temperature.py0000644000175000017500000000570312301414317023072 0ustar andreasandreasimport time from landscape.lib.monitor import CoverageMonitor from landscape.lib.sysstats import get_thermal_zones from landscape.accumulate import Accumulator from landscape.monitor.plugin import MonitorPlugin class Temperature(MonitorPlugin): """Capture thermal zone temperatures and trip point settings.""" persist_name = "temperature" scope = "temperature" # Prevent the Plugin base-class from scheduling looping calls. run_interval = None def __init__(self, interval=30, monitor_interval=60 * 60, thermal_zone_path=None, create_time=time.time): self.thermal_zone_path = thermal_zone_path self._interval = interval self._monitor_interval = monitor_interval self._create_time = create_time self._thermal_zones = [] self._temperatures = {} for thermal_zone in get_thermal_zones(self.thermal_zone_path): self._thermal_zones.append(thermal_zone.name) self._temperatures[thermal_zone.name] = [] def register(self, registry): super(Temperature, self).register(registry) if self._thermal_zones: self._accumulate = Accumulator(self._persist, self.registry.step_size) registry.reactor.call_every(self._interval, self.run) self._monitor = CoverageMonitor(self._interval, 0.8, "temperature snapshot", create_time=self._create_time) registry.reactor.call_every(self._monitor_interval, self._monitor.log) registry.reactor.call_on("stop", self._monitor.log, priority=2000) self.call_on_accepted("temperature", self.exchange, True) def create_messages(self): messages = [] for zone in self._thermal_zones: temperatures = self._temperatures[zone] self._temperatures[zone] = [] if not temperatures: continue messages.append({"type": "temperature", "thermal-zone": zone, "temperatures": temperatures}) return messages def send_messages(self, urgent): for message in self.create_messages(): self.registry.broker.send_message( message, self._session_id, urgent=urgent) def exchange(self, urgent=False): self.registry.broker.call_if_accepted("temperature", self.send_messages, urgent) def run(self): self._monitor.ping() now = int(self._create_time()) for zone in get_thermal_zones(self.thermal_zone_path): if zone.temperature_value is not None: key = ("accumulate", zone.name) step_data = self._accumulate(now, zone.temperature_value, key) if step_data: self._temperatures[zone.name].append(step_data) landscape-client-14.01/landscape/deployment.py0000644000175000017500000004412012301414317021222 0ustar andreasandreasimport os import sys from configobj import ConfigObj, ConfigObjError from logging import (getLevelName, getLogger, FileHandler, StreamHandler, Formatter) from optparse import OptionParser, SUPPRESS_HELP from landscape import VERSION from landscape.lib.persist import Persist from landscape.upgraders import UPGRADE_MANAGERS def init_logging(configuration, program_name): """Given a basic configuration, set up logging.""" handlers = [] if not os.path.exists(configuration.log_dir): os.makedirs(configuration.log_dir) log_filename = os.path.join(configuration.log_dir, program_name + ".log") handlers.append(FileHandler(log_filename)) if not configuration.quiet: handlers.append(StreamHandler(sys.stdout)) getLogger().setLevel(getLevelName(configuration.log_level.upper())) for handler in handlers: getLogger().addHandler(handler) format = ("%(asctime)s %(levelname)-8s [%(threadName)-10s] " "%(message)s") handler.setFormatter(Formatter(format)) class ConfigSpecOptionParser(OptionParser): _config_spec_definitions = {} def __init__(self, unsaved_options=None): OptionParser.__init__(self, unsaved_options) def add_option(self, *args, **kwargs): option = OptionParser.add_option(self, *args, **kwargs) print dir(option) print option.get_opt_string() return option class BaseConfiguration(object): """Base class for configuration implementations. @cvar required_options: Optionally, a sequence of key names to require when reading or writing a configuration. @cvar unsaved_options: Optionally, a sequence of key names to never write to the configuration file. This is useful when you want to provide command-line options that should never end up in a configuration file. @cvar default_config_filenames: A sequence of filenames to check when reading or writing a configuration. Default values for supported options are set as in L{make_parser}. """ required_options = () unsaved_options = () default_config_filenames = ["/etc/landscape/client.conf"] if (os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts")): default_config_filenames.insert(0, "landscape-client.conf") default_config_filenames = tuple(default_config_filenames) config_section = "client" def __init__(self): self._set_options = {} self._command_line_args = [] self._command_line_options = {} self._config_filename = None self._config_file_options = {} self._parser = self.make_parser() self._command_line_defaults = self._parser.defaults.copy() # We don't want them mixed with explicitly given options, # otherwise we can't define the precedence properly. self._parser.defaults.clear() def __getattr__(self, name): """Find and return the value of the given configuration parameter. The following sources will be searched: - The attributes that were explicitly set on this object, - The parameters specified on the command line, - The parameters specified in the configuration file, and - The defaults. If no values are found and the parameter does exist as a possible parameter, C{None} is returned. Otherwise C{AttributeError} is raised. """ for options in [self._set_options, self._command_line_options, self._config_file_options, self._command_line_defaults]: if name in options: value = options[name] break else: if self._parser.has_option("--" + name.replace("_", "-")): value = None else: raise AttributeError(name) if isinstance(value, basestring): option = self._parser.get_option("--" + name.replace("_", "-")) if option is not None: value = option.convert_value(None, value) return value def clone(self): """ Return a new configuration object, with the same settings as this one. """ config = self.__class__() config._set_options = self._set_options.copy() config._command_line_options = self._command_line_options.copy() config._config_filename = self._config_filename config._config_file_options = self._config_file_options.copy() return config def get(self, name, default=None): """Return the value of the C{name} option or C{default}.""" try: return self.__getattr__(name) except AttributeError: return default def __setattr__(self, name, value): """Set a configuration parameter. If the name begins with C{_}, it will only be set on this object and not stored in the configuration file. """ if name.startswith("_"): super(BaseConfiguration, self).__setattr__(name, value) else: self._set_options[name] = value def reload(self): """Reload options using the configured command line arguments. @see: L{load_command_line} """ self.load(self._command_line_args) def load(self, args, accept_nonexistent_config=False): """ Load configuration data from command line arguments and a config file. @raise: A SystemExit if the arguments are bad. """ self.load_command_line(args) if self.config: config_filenames = [self.config] else: config_filenames = self.default_config_filenames # Parse configuration file, if found. for config_filename in config_filenames: if (os.path.isfile(config_filename) and os.access(config_filename, os.R_OK)): self.load_configuration_file(config_filename) break else: if not accept_nonexistent_config: if len(config_filenames) == 1: message = ( "error: config file %s can't be read" % config_filenames[0]) else: message = "error: no config file could be read" sys.exit(message) self._load_external_options() # Check that all needed options were given. for option in self.required_options: if not getattr(self, option): sys.exit("error: must specify --%s " "or the '%s' directive in the config file." % (option.replace('_', '-'), option)) def _load_external_options(self): """Hook for loading options from elsewhere (e.g. for --import).""" def load_command_line(self, args): """Load configuration data from the given command line.""" self._command_line_args = args values = self._parser.parse_args(args)[0] self._command_line_options = vars(values) def load_configuration_file(self, filename): """Load configuration data from the given file name. If any data has already been set on this configuration object, then the old data will take precedence. """ self._config_filename = filename config_obj = self._get_config_object() try: self._config_file_options = config_obj[self.config_section] except KeyError: pass def _get_config_object(self, alternative_config=None): """Create a L{ConfigObj} consistent with our preferences. @param config_source: Optional readable source to read from instead of the default configuration file. """ config_source = alternative_config or self.get_config_filename() # Setting list_values to False prevents ConfigObj from being "smart" # about lists (it now treats them as strings). See bug #1228301 for # more context. # Setting raise_errors to False causes ConfigObj to batch all parsing # errors into one ConfigObjError raised at the end of the parse instead # of raising the first one and then exiting. This also allows us to # recover the good config values in the error handler below. # Setting write_empty_values to True prevents configObj writes # from writing "" as an empty value, which get_plugins interprets as # '""' which search for a plugin named "". See bug #1241821. try: config_obj = ConfigObj(config_source, list_values=False, raise_errors=False, write_empty_values=True) except ConfigObjError, e: logger = getLogger() logger.warn(str(e)) # Good configuration values are recovered here config_obj = e.config return config_obj def write(self): """Write back configuration to the configuration file. Values which match the default option in the parser won't be saved. Options are considered in the following precedence: 1. Manually set options (C{config.option = value}) 2. Options passed in the command line 3. Previously existent options in the configuration file The filename picked for saving configuration options is the one returned by L{get_config_filename}. """ # The filename we'll write to filename = self.get_config_filename() # Make sure we read the old values from the config file so that we # don't remove *unrelated* values. config_obj = self._get_config_object() if not self.config_section in config_obj: config_obj[self.config_section] = {} all_options = self._config_file_options.copy() all_options.update(self._command_line_options) all_options.update(self._set_options) section = config_obj[self.config_section] for name, value in all_options.items(): if name != "config" and name not in self.unsaved_options: if (value == self._command_line_defaults.get(name) and name not in self._config_file_options and name not in self._command_line_options): # We don't want to write this value to the config file # as it is default value and as not present in the # config file if name in config_obj[self.config_section]: del config_obj[self.config_section][name] else: section[name] = value config_obj[self.config_section] = section config_obj.filename = filename config_obj.write() def make_parser(self): """Parser factory for supported options @return: An L{OptionParser} preset with options that all landscape-related programs accept. These include - C{config} (C{None}) - C{data_path} (C{"/var/lib/landscape/client/"}) """ parser = OptionParser(version=VERSION) parser.add_option("-c", "--config", metavar="FILE", help="Use config from this file (any command line " "options override settings from the file) " "(default: '/etc/landscape/client.conf').") parser.add_option("-d", "--data-path", metavar="PATH", default="/var/lib/landscape/client/", help="The directory to store data files in " "(default: '/var/lib/landscape/client/').") return parser def get_config_filename(self): """Pick the proper configuration file. The picked filename is: 1. C{self.config}, if defined 2. The last loaded configuration file, if any 3. The first filename in C{self.default_config_filenames} """ if self.config: return self.config if self._config_filename: return self._config_filename if self.default_config_filenames: for potential_config_file in self.default_config_filenames: if os.access(potential_config_file, os.R_OK): return potential_config_file return self.default_config_filenames[0] return None def get_command_line_options(self): """Get currently loaded command line options. @see: L{load_command_line} """ return self._command_line_options class Configuration(BaseConfiguration): """Configuration data for Landscape client. This contains all simple data, some of it calculated. """ DEFAULT_URL = "https://landscape.canonical.com/message-system" def make_parser(self): """Parser factory for supported options. @return: An L{OptionParser} preset for all options from L{BaseConfiguration.make_parser} plus: - C{quiet} (C{False}) - C{log_dir} (C{"/var/log/landscape"}) - C{log_level} (C{"info"}) - C{url} (C{"http://landscape.canonical.com/message-system"}) - C{ping_url} (C{"http://landscape.canonical.com/ping"}) - C{ssl_public_key} - C{server_autodiscover} (C{"false"}) - C{autodiscover_srv_query_string} (C{"_tcp._landscape.localdomain"}) - C{autodiscover_a_query_string} (C{"landscape.localdomain"}) - C{ignore_sigint} (C{False}) """ parser = super(Configuration, self).make_parser() parser.add_option("-q", "--quiet", default=False, action="store_true", help="Do not log to the standard output.") parser.add_option("-l", "--log-dir", metavar="FILE", help="The directory to write log files to " "(default: '/var/log/landscape').", default="/var/log/landscape") parser.add_option("--log-level", default="info", help="One of debug, info, warning, error or " "critical.") parser.add_option("-u", "--url", default=self.DEFAULT_URL, help="The server URL to connect to.") parser.add_option("--ping-url", help="The URL to perform lightweight exchange " "initiation with.", default="http://landscape.canonical.com/ping") parser.add_option("-k", "--ssl-public-key", help="The public SSL key to verify the server. " "Only used if the given URL is https.") parser.add_option("--server-autodiscover", type="string", default=False, help="Enable server autodiscovery.") parser.add_option("--autodiscover-srv-query-string", type="string", default="_landscape._tcp.localdomain", help="autodiscovery string for DNS SRV queries") parser.add_option("--autodiscover-a-query-string", type="string", default="landscape.localdomain", help="autodiscovery string for DNS A queries") parser.add_option("--ignore-sigint", action="store_true", default=False, help="Ignore interrupt signals.") parser.add_option("--ignore-sigusr1", action="store_true", default=False, help="Ignore SIGUSR1 signal to " "rotate logs.") parser.add_option("--package-monitor-interval", default=30 * 60, type="int", help="The interval between package monitor runs " "(default: 1800).") parser.add_option("--apt-update-interval", default=6 * 60 * 60, type="int", help="The interval between apt update runs " "(default: 21600).") parser.add_option("--flush-interval", default=5 * 60, type="int", metavar="INTERVAL", help="The number of seconds between flushes to disk " "for persistent data.") # Hidden options, used for load-testing to run in-process clones parser.add_option("--clones", default=0, type=int, help=SUPPRESS_HELP) parser.add_option("--start-clones-over", default=25 * 60, type=int, help=SUPPRESS_HELP) return parser def load(self, args, accept_nonexistent_config=False): """ Load configuration data from command line arguments and a config file. """ super(Configuration, self).load( args, accept_nonexistent_config=accept_nonexistent_config) if not isinstance(self.server_autodiscover, bool): autodiscover = str(self.server_autodiscover).lower() self.server_autodiscover = (autodiscover == "true") @property def sockets_path(self): """Return the path to the directory where Unix sockets are created.""" return os.path.join(self.data_path, "sockets") @property def annotations_path(self): """ Return the path to the directory where additional annotation files can be stored. """ return os.path.join(self.data_path, "annotations.d") @property def juju_filename(self): """Get the path to the Juju JSON file.""" return os.path.join(self.data_path, "juju-info.json") def get_versioned_persist(service): """Get a L{Persist} database with upgrade rules applied. Load a L{Persist} database for the given C{service} and upgrade or mark as current, as necessary. """ persist = Persist(filename=service.persist_filename) upgrade_manager = UPGRADE_MANAGERS[service.service_name] if os.path.exists(service.persist_filename): upgrade_manager.apply(persist) else: upgrade_manager.initialize(persist) persist.save(service.persist_filename) return persist landscape-client-14.01/landscape/tests/0000755000175000017500000000000012301414317017631 5ustar andreasandreaslandscape-client-14.01/landscape/tests/helpers.py0000644000175000017500000005125712301414317021657 0ustar andreasandreasfrom cStringIO import StringIO from ConfigParser import ConfigParser import logging import shutil import pprint import re import os import sys import unittest from logging import Handler, ERROR, Formatter from twisted.trial.unittest import TestCase from twisted.python.failure import Failure from twisted.internet.defer import Deferred from landscape.tests.subunit import run_isolated from landscape.tests.mocker import MockerTestCase from landscape.watchdog import bootstrap_list from landscape.lib.persist import Persist from landscape.reactor import FakeReactor from landscape.deployment import BaseConfiguration from landscape.broker.config import BrokerConfiguration from landscape.broker.transport import FakeTransport from landscape.monitor.config import MonitorConfiguration from landscape.monitor.monitor import Monitor from landscape.manager.manager import Manager from landscape.broker.service import BrokerService from landscape.broker.amp import FakeRemoteBroker, RemoteBrokerConnector from landscape.manager.config import ManagerConfiguration DEFAULT_ACCEPTED_TYPES = [ "accepted-types", "registration", "resynchronize", "set-id", "set-intervals", "unknown-id"] class HelperTestCase(unittest.TestCase): helpers = [] def setUp(self): self._helper_instances = [] if LogKeeperHelper not in self.helpers: self.helpers.insert(0, LogKeeperHelper) result = None for helper_factory in self.helpers: helper = helper_factory() if hasattr(helper, "set_up"): result = helper.set_up(self) self._helper_instances.append(helper) # Return the return value of the last helper, which # might be a deferred return result def tearDown(self): for helper in reversed(self._helper_instances): if hasattr(helper, "tear_down"): helper.tear_down(self) class MessageTestCase(unittest.TestCase): def assertMessage(self, obtained, expected): obtained = obtained.copy() for key in ["api", "timestamp"]: if key not in expected and key in obtained: obtained.pop(key) if obtained != expected: raise self.failureException("Messages don't match.\n" "Expected:\n%s\nObtained:\n%s\n" % (pprint.pformat(expected), pprint.pformat(obtained))) def assertMessages(self, obtained, expected): self.assertEqual(type(obtained), list) self.assertEqual(type(expected), list) for obtained_message, expected_message in zip(obtained, expected): self.assertMessage(obtained_message, expected_message) obtained_len = len(obtained) expected_len = len(expected) diff = abs(expected_len - obtained_len) if obtained_len < expected_len: extra = pprint.pformat(expected[-diff:]) raise self.failureException("Expected the following %d additional " "messages:\n%s" % (diff, extra)) elif expected_len < obtained_len: extra = pprint.pformat(obtained[-diff:]) raise self.failureException("Got %d more messages than expected:\n" "%s" % (diff, extra)) class LandscapeTest(MessageTestCase, MockerTestCase, HelperTestCase, TestCase): def setUp(self): self._old_config_filenames = BaseConfiguration.default_config_filenames BaseConfiguration.default_config_filenames = [self.makeFile("")] MockerTestCase.setUp(self) TestCase.setUp(self) return HelperTestCase.setUp(self) def tearDown(self): BaseConfiguration.default_config_filenames = self._old_config_filenames TestCase.tearDown(self) HelperTestCase.tearDown(self) MockerTestCase.tearDown(self) def successResultOf(self, deferred): """See C{twisted.trial._synctest._Assertions.successResultOf}. This is a copy of the original method, which is available only since Twisted 12.3.0 (from 2012-12-20). """ result = [] deferred.addBoth(result.append) if not result: self.fail( "Success result expected on %r, found no result instead" % ( deferred,)) elif isinstance(result[0], Failure): self.fail( "Success result expected on %r, " "found failure result (%r) instead" % (deferred, result[0])) else: return result[0] def failureResultOf(self, deferred): """See C{twisted.trial._synctest._Assertions.failureResultOf}. This is a copy of the original method, which is available only since Twisted 12.3.0 (from 2012-12-20). """ result = [] deferred.addBoth(result.append) if not result: self.fail( "Failure result expected on %r, found no result instead" % ( deferred,)) elif not isinstance(result[0], Failure): self.fail( "Failure result expected on %r, " "found success result (%r) instead" % (deferred, result[0])) else: return result[0] def assertNoResult(self, deferred): """See C{twisted.trial._synctest._Assertions.assertNoResult}. This is a copy of the original method, which is available only since Twisted 12.3.0 (from 2012-12-20). """ result = [] deferred.addBoth(result.append) if result: self.fail( "No result expected on %r, found %r instead" % ( deferred, result[0])) def assertDeferredSucceeded(self, deferred): self.assertTrue(isinstance(deferred, Deferred)) called = [] def callback(result): called.append(True) deferred.addCallback(callback) self.assertTrue(called) def assertSuccess(self, deferred, result=None): """ Assert that the given C{deferred} results in the given C{result}. """ self.assertTrue(isinstance(deferred, Deferred)) return deferred.addCallback(self.assertEqual, result) def assertFileContent(self, filename, expected_content): fd = open(filename) actual_content = fd.read() fd.close() self.assertEqual(expected_content, actual_content) def assertConfigEqual(self, first, second): """ Compare two configuration files for equality. The order of parameters and comments may be different but the actual parameters and sections must be the same. """ first_fp = StringIO(first) first_parser = ConfigParser() first_parser.readfp(first_fp) second_fp = StringIO(second) second_parser = ConfigParser() second_parser.readfp(second_fp) self.assertEqual(set(first_parser.sections()), set(second_parser.sections())) for section in first_parser.sections(): self.assertEqual(dict(first_parser.items(section)), dict(second_parser.items(section))) def makePersistFile(self, *args, **kwargs): """Return a temporary filename to be used by a L{Persist} object. The possible .old persist file is cleaned up after the test. @see: L{MockerTestCase.makeFile} """ persist_filename = self.makeFile(*args, **kwargs) def remove_saved_persist(): try: os.remove(persist_filename + ".old") except OSError: pass self.addCleanup(remove_saved_persist) return persist_filename class LandscapeIsolatedTest(LandscapeTest): """TestCase that also runs all test methods in a subprocess.""" def run(self, result): if not getattr(LandscapeTest, "_cleanup_patch", False): run_method = LandscapeTest.run def run_wrapper(oself, *args, **kwargs): try: return run_method(oself, *args, **kwargs) finally: MockerTestCase._MockerTestCase__cleanup(oself) LandscapeTest.run = run_wrapper LandscapeTest._cleanup_patch = True run_isolated(LandscapeTest, self, result) class ErrorHandler(Handler): def __init__(self, *args, **kwargs): Handler.__init__(self, *args, **kwargs) self.errors = [] def emit(self, record): if record.levelno >= ERROR: self.errors.append(record) class LoggedErrorsError(Exception): def __str__(self): out = "The following errors were logged\n" formatter = Formatter() for error in self.args[0]: out += formatter.format(error) + "\n" return out class LogKeeperHelper(object): """Record logging information. Puts a 'logfile' attribute on your test case, which is a StringIO containing all log output. """ def set_up(self, test_case): self.ignored_exception_regexes = [] self.ignored_exception_types = [] self.error_handler = ErrorHandler() test_case.log_helper = self test_case.logger = logger = logging.getLogger() test_case.logfile = StringIO() handler = logging.StreamHandler(test_case.logfile) format = ("%(levelname)8s: %(message)s") handler.setFormatter(logging.Formatter(format)) self.old_handlers = logger.handlers self.old_level = logger.level logger.handlers = [handler, self.error_handler] logger.setLevel(logging.NOTSET) def tear_down(self, test_case): logger = logging.getLogger() logger.setLevel(self.old_level) logger.handlers = self.old_handlers errors = [] for record in self.error_handler.errors: for ignored_type in self.ignored_exception_types: if (record.exc_info and record.exc_info[0] and issubclass(record.exc_info[0], ignored_type)): break else: for ignored_regex in self.ignored_exception_regexes: if ignored_regex.match(record.message): break else: errors.append(record) if errors: raise LoggedErrorsError(errors) def ignore_errors(self, type_or_regex): if isinstance(type_or_regex, basestring): self.ignored_exception_regexes.append(re.compile(type_or_regex)) else: self.ignored_exception_types.append(type_or_regex) class EnvironSnapshot(object): def __init__(self): self._snapshot = os.environ.copy() def restore(self): os.environ.update(self._snapshot) for key in list(os.environ): if key not in self._snapshot: del os.environ[key] class EnvironSaverHelper(object): def set_up(self, test_case): self._snapshot = EnvironSnapshot() def tear_down(self, test_case): self._snapshot.restore() class FakeBrokerServiceHelper(object): """ The following attributes will be set in your test case: - broker_service: A C{BrokerService}. - remote: A C{FakeRemoteBroker} behaving like a L{RemoteBroker} connected to the broker serivice but performing all operation synchronously. """ def set_up(self, test_case): test_case.data_path = test_case.makeDir() log_dir = test_case.makeDir() test_case.config_filename = test_case.makeFile( "[client]\n" "url = http://localhost:91919\n" "computer_title = Some Computer\n" "account_name = some_account\n" "ping_url = http://localhost:91910\n" "data_path = %s\n" "log_dir = %s\n" % (test_case.data_path, log_dir)) bootstrap_list.bootstrap(data_path=test_case.data_path, log_dir=log_dir) config = BrokerConfiguration() config.load(["-c", test_case.config_filename]) class FakeBrokerService(BrokerService): reactor_factory = FakeReactor transport_factory = FakeTransport test_case.broker_service = FakeBrokerService(config) test_case.remote = FakeRemoteBroker( test_case.broker_service.exchanger, test_case.broker_service.message_store, test_case.broker_service.broker) class BrokerServiceHelper(FakeBrokerServiceHelper): """ Provides what L{FakeBrokerServiceHelper} does, and makes it a 'live' service using a real L{RemoteBroker} connected over AMP. This adds the following attributes to your test case: - remote: A connected L{RemoteBroker}. """ def set_up(self, test_case): super(BrokerServiceHelper, self).set_up(test_case) test_case.broker_service.startService() # Use different reactor to simulate separate processes self._connector = RemoteBrokerConnector( FakeReactor(), test_case.broker_service.config) deferred = self._connector.connect() test_case.remote = test_case.successResultOf(deferred) def tear_down(self, test_case): self._connector.disconnect() test_case.broker_service.stopService() class MonitorHelper(FakeBrokerServiceHelper): """ Provides everything that L{FakeBrokerServiceHelper} does plus a L{Monitor} instance. """ def set_up(self, test_case): super(MonitorHelper, self).set_up(test_case) persist = Persist() persist_filename = test_case.makePersistFile() test_case.config = MonitorConfiguration() test_case.config.load(["-c", test_case.config_filename]) test_case.reactor = FakeReactor() test_case.monitor = Monitor( test_case.reactor, test_case.config, persist, persist_filename) test_case.monitor.broker = test_case.remote test_case.mstore = test_case.broker_service.message_store class ManagerHelper(FakeBrokerServiceHelper): """ Provides everything that L{FakeBrokerServiceHelper} does plus a L{Manager} instance. """ def set_up(self, test_case): super(ManagerHelper, self).set_up(test_case) test_case.config = ManagerConfiguration() test_case.config.load(["-c", test_case.config_filename]) test_case.reactor = FakeReactor() test_case.manager = Manager(test_case.reactor, test_case.config) test_case.manager.broker = test_case.remote class MockPopen(object): def __init__(self, output, return_codes=None): self.output = output self.stdout = StringIO(output) self.popen_inputs = [] self.return_codes = return_codes def __call__(self, args, stdout=None, stderr=None): return self.popen(args, stdout=stdout, stderr=stderr) def popen(self, args, stdout=None, stderr=None): self.popen_inputs.append(args) return self def wait(self): if self.return_codes is None: return 0 return self.return_codes.pop(0) class StandardIOHelper(object): def set_up(self, test_case): from StringIO import StringIO test_case.old_stdout = sys.stdout test_case.old_stdin = sys.stdin test_case.stdout = sys.stdout = StringIO() test_case.stdin = sys.stdin = StringIO() test_case.stdin.encoding = "UTF-8" def tear_down(self, test_case): sys.stdout = test_case.old_stdout sys.stdin = test_case.old_stdin class MockCoverageMonitor(object): def __init__(self, count=None, expected_count=None, percent=None, since_reset=None, warn=None): self.count = count or 0 self.expected_count = expected_count or 0 self.percent = percent or 0.0 self.since_reset_value = since_reset or 0 self.warn_value = bool(warn) def since_reset(self): return self.since_reset_value def warn(self): return self.warn_value def reset(self): pass class MockFrequencyMonitor(object): def __init__(self, count=None, expected_count=None, warn=None): self.count = count or 0 self.expected_count = expected_count or 0 self.warn_value = bool(warn) def warn(self): return self.warn_value def reset(self): pass def mock_counter(i=0): """Generator starts at zero and yields integers that grow by one.""" while True: yield i i += 1 def mock_time(): """Generator starts at 100 and yields int timestamps that grow by one.""" return mock_counter(100) class StubProcessFactory(object): """ A L{IReactorProcess} provider which records L{spawnProcess} calls and allows tests to get at the protocol. """ def __init__(self): self.spawns = [] def spawnProcess(self, protocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): self.spawns.append((protocol, executable, args, env, path, uid, gid, usePTY, childFDs)) class DummyProcess(object): """A process (transport) that doesn't do anything.""" def __init__(self): self.signals = [] def signalProcess(self, signal): self.signals.append(signal) def closeChildFD(self, fd): pass class ProcessDataBuilder(object): """Builder creates sample data for the process info plugin to consume. @param sample_dir: The directory for sample data. """ RUNNING = "R (running)" STOPPED = "T (stopped)" TRACING_STOP = "T (tracing stop)" DISK_SLEEP = "D (disk sleep)" SLEEPING = "S (sleeping)" DEAD = "X (dead)" ZOMBIE = "Z (zombie)" def __init__(self, sample_dir): self._sample_dir = sample_dir def create_data(self, process_id, state, uid, gid, started_after_boot=0, process_name=None, generate_cmd_line=True, stat_data=None, vmsize=11676): """Creates sample data for a process. @param started_after_boot: The amount of time, in jiffies, between the system uptime and start of the process. @param process_name: Used to generate the process name that appears in /proc/%(pid)s/status @param generate_cmd_line: If true, place the process_name in /proc/%(pid)s/cmdline, otherwise leave it empty (this simulates a kernel process) @param stat_data: Array of items to write to the /proc//stat file. """ sample_data = """ Name: %(process_name)s State: %(state)s Tgid: 24759 Pid: 24759 PPid: 17238 TracerPid: 0 Uid: %(uid)d 0 0 0 Gid: %(gid)d 0 0 0 FDSize: 256 Groups: 4 20 24 25 29 30 44 46 106 110 112 1000 VmPeak: 11680 kB VmSize: %(vmsize)d kB VmLck: 0 kB VmHWM: 6928 kB VmRSS: 6924 kB VmData: 1636 kB VmStk: 196 kB VmExe: 1332 kB VmLib: 4240 kB VmPTE: 20 kB Threads: 1 SigQ: 0/4294967295 SigPnd: 0000000000000000 ShdPnd: 0000000000000000 SigBlk: 0000000000000000 SigIgn: 0000000000000000 SigCgt: 0000000059816eff CapInh: 0000000000000000 CapPrm: 0000000000000000 CapEff: 0000000000000000 """ % ({"process_name": process_name[:15], "state": state, "uid": uid, "gid": gid, "vmsize": vmsize}) process_dir = os.path.join(self._sample_dir, str(process_id)) os.mkdir(process_dir) filename = os.path.join(process_dir, "status") file = open(filename, "w+") try: file.write(sample_data) finally: file.close() if stat_data is None: stat_data = """\ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 %d\ """ % (started_after_boot,) filename = os.path.join(process_dir, "stat") file = open(filename, "w+") try: file.write(stat_data) finally: file.close() if generate_cmd_line: sample_data = """\ /usr/sbin/%(process_name)s\0--pid-file\0/var/run/%(process_name)s.pid\0 """ % {"process_name": process_name} else: sample_data = "" filename = os.path.join(process_dir, "cmdline") file = open(filename, "w+") try: file.write(sample_data) finally: file.close() def remove_data(self, process_id): """Remove sample data for the process that matches C{process_id}.""" process_dir = os.path.join(self._sample_dir, str(process_id)) shutil.rmtree(process_dir) class FakePersist(object): """ Incompletely fake a C{landscape.lib.Persist} to simplify higher level tests that result in an attempt to clear down persisted data. """ def __init__(self): self.called = False def remove(self, key): self.called = True landscape-client-14.01/landscape/tests/test_log.py0000644000175000017500000000500612301414317022024 0ustar andreasandreasimport logging from landscape.log import (format_object, format_delta, format_percent, rotate_logs) from landscape.tests.helpers import LandscapeTest def function(): pass class FormatObjectTest(LandscapeTest): def test_format_instance(self): self.assertEqual(format_object(self), "landscape.tests.test_log.FormatObjectTest") def method(self): pass def test_format_method(self): self.assertEqual(format_object(self.method), "landscape.tests.test_log.FormatObjectTest.method()") def test_format_function(self): self.assertEqual(format_object(function), "landscape.tests.test_log.function()") # FIXME Write tests to make sure that inner functions render # usefully. class FormatDeltaTest(LandscapeTest): def test_format_float(self): self.assertEqual(format_delta(0.0), "0.00s") self.assertEqual(format_delta(47.16374), "47.16s") self.assertEqual(format_delta(100.0), "100.00s") def test_format_int(self): self.assertEqual(format_delta(0), "0.00s") self.assertEqual(format_delta(47), "47.00s") self.assertEqual(format_delta(100), "100.00s") def test_format_none(self): self.assertEqual(format_delta(None), "0.00s") class FormatPercentTest(LandscapeTest): def test_format_float(self): self.assertEqual(format_percent(0.0), "0.00%") self.assertEqual(format_percent(47.16374), "47.16%") self.assertEqual(format_percent(100.0), "100.00%") def test_format_int(self): self.assertEqual(format_percent(0), "0.00%") self.assertEqual(format_percent(47), "47.00%") self.assertEqual(format_percent(100), "100.00%") def test_format_none(self): self.assertEqual(format_percent(None), "0.00%") class RotateLogsTest(LandscapeTest): def test_log_rotation(self): logging.getLogger().addHandler(logging.FileHandler(self.makeFile())) # Store the initial set of handlers original_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] rotate_logs() new_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] for stream in new_streams: self.assertTrue(stream not in original_streams) landscape-client-14.01/landscape/tests/test_sysvconfig.py0000644000175000017500000000624512301414317023443 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.sysvconfig import SysVConfig, ProcessError class SysVConfigTest(LandscapeTest): def test_set_to_run_on_boot(self): filename = self.makeFile("RUN=0\n") sysvconfig = SysVConfig(filename) sysvconfig.set_start_on_boot(True) self.assertEqual(file(filename, "r").read(), "RUN=1\n") def test_set_to_not_run_on_boot(self): filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) sysvconfig.set_start_on_boot(False) self.assertEqual(file(filename, "r").read(), "RUN=0\n") def test_configured_to_run(self): filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) self.assertTrue(sysvconfig.is_configured_to_run()) def test_not_configured_to_run(self): filename = self.makeFile("RUN=0\n") sysvconfig = SysVConfig(filename) self.assertFalse(sysvconfig.is_configured_to_run()) def test_blank_line(self): filename = self.makeFile("RUN=1\n\n") sysvconfig = SysVConfig(filename) self.assertTrue(sysvconfig.is_configured_to_run()) def test_spaces(self): filename = self.makeFile(" RUN = 1 \n") sysvconfig = SysVConfig(filename) self.assertFalse(sysvconfig.is_configured_to_run()) def test_leading_and_trailing_spaces(self): filename = self.makeFile(" RUN=1 \n") sysvconfig = SysVConfig(filename) self.assertTrue(sysvconfig.is_configured_to_run()) def test_spaces_in_value(self): filename = self.makeFile(" RUN= 1 \n") sysvconfig = SysVConfig(filename) self.assertFalse(sysvconfig.is_configured_to_run()) def test_non_integer_run(self): filename = self.makeFile("RUN=yesplease") sysvconfig = SysVConfig(filename) self.assertTrue(sysvconfig.is_configured_to_run()) def test_run_landscape(self): system = self.mocker.replace("os.system") system("/etc/init.d/landscape-client restart") self.mocker.replay() filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) sysvconfig.restart_landscape() def test_run_landscape_with_error(self): system = self.mocker.replace("os.system") system("/etc/init.d/landscape-client restart") self.mocker.result(-1) self.mocker.replay() filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) self.assertRaises(ProcessError, sysvconfig.restart_landscape) def test_stop_landscape(self): system = self.mocker.replace("os.system") system("/etc/init.d/landscape-client stop") self.mocker.replay() filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) sysvconfig.stop_landscape() def test_stop_landscape_with_error(self): system = self.mocker.replace("os.system") system("/etc/init.d/landscape-client stop") self.mocker.result(-1) self.mocker.replay() filename = self.makeFile("RUN=1\n") sysvconfig = SysVConfig(filename) self.assertRaises(ProcessError, sysvconfig.stop_landscape) landscape-client-14.01/landscape/tests/clock.py0000644000175000017500000002142412301414317021301 0ustar andreasandreas# Copyright (c) 2001-2007 Twisted Matrix Laboratories. # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Copies of certain classes from Twisted 2.5, so that we can use the functionality they provide with Twisted 2.2 and up. These should really only be used from the test suite for now. Currently: * L{twisted.internet.task.Clock}, which is new in Twisted 2.5. * L{twisted.internet.base.DelayedCall}, which didn't grow its C{seconds} argument until after Twisted 2.2. """ from twisted.internet import error from twisted.python.runtime import seconds as runtimeSeconds from twisted.python import reflect import traceback class Clock: """ Provide a deterministic, easily-controlled implementation of L{IReactorTime.callLater}. This is commonly useful for writing deterministic unit tests for code which schedules events using this API. """ rightNow = 0.0 def __init__(self): self.calls = [] def seconds(self): """ Pretend to be time.time(). This is used internally when an operation such as L{IDelayedCall.reset} needs to determine a a time value relative to the current time. @rtype: C{float} @return: The time which should be considered the current time. """ return self.rightNow def callLater(self, when, what, *a, **kw): """ See L{twisted.internet.interfaces.IReactorTime.callLater}. """ self.calls.append( DelayedCall(self.seconds() + when, what, a, kw, self.calls.remove, lambda c: None, self.seconds)) self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime())) return self.calls[-1] def advance(self, amount): """ Move time on this clock forward by the given amount and run whatever pending calls should be run. @type amount: C{float} @param amount: The number of seconds which to advance this clock's time. """ self.rightNow += amount while self.calls and self.calls[0].getTime() <= self.seconds(): call = self.calls.pop(0) call.called = 1 call.func(*call.args, **call.kw) def pump(self, timings): """ Advance incrementally by the given set of times. @type timings: iterable of C{float} """ for amount in timings: self.advance(amount) class DelayedCall: # enable .debug to record creator call stack, and it will be logged if # an exception occurs while the function is being run debug = False _str = None def __init__(self, time, func, args, kw, cancel, reset, seconds=runtimeSeconds): """ @param time: Seconds from the epoch at which to call C{func}. @param func: The callable to call. @param args: The positional arguments to pass to the callable. @param kw: The keyword arguments to pass to the callable. @param cancel: A callable which will be called with this DelayedCall before cancellation. @param reset: A callable which will be called with this DelayedCall after changing this DelayedCall's scheduled execution time. The callable should adjust any necessary scheduling details to ensure this DelayedCall is invoked at the new appropriate time. @param seconds: If provided, a no-argument callable which will be used to determine the current time any time that information is needed. """ self.time, self.func, self.args, self.kw = time, func, args, kw self.resetter = reset self.canceller = cancel self.seconds = seconds self.cancelled = self.called = 0 self.delayed_time = 0 if self.debug: self.creator = traceback.format_stack()[:-2] def getTime(self): """Return the time at which this call will fire @rtype: C{float} @return: The number of seconds after the epoch at which this call is scheduled to be made. """ return self.time + self.delayed_time def cancel(self): """Unschedule this call @raise AlreadyCancelled: Raised if this call has already been unscheduled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.canceller(self) self.cancelled = 1 if self.debug: self._str = str(self) del self.func, self.args, self.kw def reset(self, secondsFromNow): """Reschedule this call for a different time @type secondsFromNow: C{float} @param secondsFromNow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: newTime = self.seconds() + secondsFromNow if newTime < self.time: self.delayed_time = 0 self.time = newTime self.resetter(self) else: self.delayed_time = newTime - self.time def delay(self, secondsLater): """Reschedule this call for a later time @type secondsLater: C{float} @param secondsLater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.delayed_time += secondsLater if self.delayed_time < 0: self.activate_delay() self.resetter(self) def activate_delay(self): self.time += self.delayed_time self.delayed_time = 0 def active(self): """Determine whether this call is still pending @rtype: C{bool} @return: True if this call has not yet been made or cancelled, False otherwise. """ return not (self.cancelled or self.called) def __le__(self, other): return self.time <= other.time def __str__(self): if self._str is not None: return self._str if hasattr(self, 'func'): if hasattr(self.func, 'func_name'): func = self.func.func_name if hasattr(self.func, 'im_class'): func = self.func.im_class.__name__ + '.' + func else: func = reflect.safe_repr(self.func) else: func = None now = self.seconds() L = ["') return "".join(L) landscape-client-14.01/landscape/tests/test_plugin.py0000644000175000017500000000321212301414317022536 0ustar andreasandreasfrom twisted.internet.defer import Deferred from landscape.tests.helpers import LandscapeTest from landscape.plugin import PluginRegistry class SamplePlugin(object): plugin_name = "sample" def __init__(self): self.registered = [] def register(self, monitor): self.registered.append(monitor) class ExchangePlugin(SamplePlugin): """A plugin which records exchange notification events.""" def __init__(self): super(ExchangePlugin, self).__init__() self.exchanged = 0 self.waiter = None def wait_for_exchange(self): self.waiter = Deferred() return self.waiter def exchange(self): self.exchanged += 1 if self.waiter is not None: self.waiter.callback(None) class PluginTest(LandscapeTest): def setUp(self): super(PluginTest, self).setUp() self.registry = PluginRegistry() def test_register_plugin(self): sample_plugin = SamplePlugin() self.registry.add(sample_plugin) self.assertEqual(sample_plugin.registered, [self.registry]) def test_get_plugins(self): plugin1 = SamplePlugin() plugin2 = SamplePlugin() self.registry.add(plugin1) self.registry.add(plugin2) self.assertEqual(self.registry.get_plugins()[-2:], [plugin1, plugin2]) def test_get_named_plugin(self): """ If a plugin has a C{plugin_name} attribute, it is possible to look it up by name after adding it to the L{Monitor}. """ plugin = SamplePlugin() self.registry.add(plugin) self.assertEqual(self.registry.get_plugin("sample"), plugin) landscape-client-14.01/landscape/tests/test_reactor.py0000644000175000017500000003112612301414317022704 0ustar andreasandreasimport thread import types import time from landscape.reactor import FakeReactor, LandscapeReactor from landscape.tests.helpers import LandscapeTest class ReactorTestMixin(object): def test_call_later(self): reactor = self.get_reactor() called = [] def dummy(): called.append("Hello!") reactor.stop() reactor.call_later(0, dummy) reactor.run() self.assertEqual(called, ["Hello!"]) def test_call_later_with_args(self): reactor = self.get_reactor() called = [] def dummy(a, b=3): called.append((a, b)) reactor.stop() reactor.call_later(0, dummy, "a", b="b") reactor.run() self.assertEqual(called, [("a", "b")]) def test_call_later_only_calls_once(self): reactor = self.get_reactor() called = [] def append(): called.append("Hey!") return True reactor.call_later(0, append) reactor.call_later(0.3, reactor.stop) reactor.run() self.assertEqual(len(called), 1) def test_cancel_call(self): reactor = self.get_reactor() called = [] id = reactor.call_later(0, called.append, "hi") reactor.cancel_call(id) reactor.call_later(0.3, reactor.stop) reactor.run() self.assertEqual(len(called), 0) def test_call_every(self): reactor = self.get_reactor() called = [] reactor.call_every(0.01, called.append, "hi") reactor.call_later(0.5, reactor.stop) reactor.run() self.failUnless(5 < len(called) < 100, len(called)) def test_cancel_call_every(self): reactor = self.get_reactor() called = [] id = reactor.call_every(0, called.append, "hi") reactor.cancel_call(id) reactor.call_later(0.3, reactor.stop) reactor.run() self.assertEqual(len(called), 0) def test_cancel_call_every_after_first_call(self): reactor = self.get_reactor() called = [] def cancel_call(): reactor.cancel_call(id) called.append("hi") id = reactor.call_every(0, cancel_call) reactor.call_later(0.1, reactor.stop) reactor.run() self.assertEqual(len(called), 1) def test_cancel_later_called(self): reactor = self.get_reactor() id = reactor.call_later(0, lambda: None) reactor.call_later(0.3, reactor.stop) reactor.run() reactor.cancel_call(id) def test_cancel_call_twice(self): """ Multiple cancellations of a call will not raise any exceptions. """ reactor = self.get_reactor() id = reactor.call_later(3, lambda: None) reactor.cancel_call(id) reactor.cancel_call(id) def test_reactor_doesnt_leak(self): reactor = self.get_reactor() called = [] reactor.call_later(0, called.append, "hi") reactor = self.get_reactor() reactor.call_later(0.01, reactor.stop) reactor.run() self.assertEqual(called, []) def test_event(self): reactor = self.get_reactor() called = [] def handle_foobar(): called.append(True) reactor.call_on("foobar", handle_foobar) reactor.fire("foobar") self.assertEqual(called, [True]) def test_event_multiple_fire(self): """ Once an event handler is registered, it's called all times that event type is fired. """ reactor = self.get_reactor() called = [] def handle_foobar(): called.append(True) reactor.call_on("foobar", handle_foobar) reactor.fire("foobar") reactor.fire("foobar") self.assertEqual(called, [True, True]) def test_event_with_args(self): reactor = self.get_reactor() called = [] def handle_foobar(a, b=3): called.append((a, b)) reactor.call_on("foobar", handle_foobar) reactor.fire("foobar", "a", b=6) self.assertEqual(called, [("a", 6)]) def test_events(self): reactor = self.get_reactor() called = [] reactor.call_on("foobar", called.append) reactor.call_on("foobar", called.append) reactor.fire("foobar", "a") self.assertEqual(called, ["a", "a"]) def test_events_result(self): reactor = self.get_reactor() generator = iter([1, 2, 3]).next reactor.call_on("foobar", generator) reactor.call_on("foobar", generator) reactor.call_on("foobar", generator) self.assertEqual(reactor.fire("foobar"), [1, 2, 3]) def test_event_priority(self): """ Event callbacks should be able to be scheduled with a priority which specifies the order they are run in. """ reactor = self.get_reactor() called = [] reactor.call_on("foobar", lambda: called.append(5), priority=5) reactor.call_on("foobar", lambda: called.append(3), priority=3) reactor.call_on("foobar", lambda: called.append(4), priority=4) reactor.fire("foobar") self.assertEqual(called, [3, 4, 5]) def test_default_priority(self): """ The default priority of an event callback should be 0. """ reactor = self.get_reactor() called = [] reactor.call_on("foobar", lambda: called.append(1), 1) reactor.call_on("foobar", lambda: called.append(0)) reactor.call_on("foobar", lambda: called.append(-1), -1) reactor.fire("foobar") self.assertEqual(called, [-1, 0, 1]) def test_exploding_event_handler(self): self.log_helper.ignore_errors(ZeroDivisionError) reactor = self.get_reactor() called = [] def handle_one(): called.append(1) def handle_two(): 1 / 0 def handle_three(): called.append(3) reactor.call_on("foobar", handle_one) reactor.call_on("foobar", handle_two) reactor.call_on("foobar", handle_three) reactor.fire("foobar") self.assertTrue(1 in called) self.assertTrue(3 in called) self.assertTrue("handle_two" in self.logfile.getvalue()) self.assertTrue("ZeroDivisionError" in self.logfile.getvalue(), self.logfile.getvalue()) def test_weird_event_type(self): #This can be useful for "namespaced" event types reactor = self.get_reactor() called = [] reactor.call_on(("message", "foobar"), called.append) reactor.fire(("message", "foobar"), "namespaced!") self.assertEqual(called, ["namespaced!"]) def test_nonexistent_event_type(self): reactor = self.get_reactor() reactor.fire("Blat!") def test_cancel_event(self): reactor = self.get_reactor() called = [] id = reactor.call_on("foobar", called.append) reactor.cancel_call(id) reactor.fire("foobar") self.assertEqual(called, []) def test_run_stop_events(self): reactor = self.get_reactor() called = [] called_copy = [] reactor.call_on("run", lambda: called.append("run")) reactor.call_on("stop", lambda: called.append("stop")) reactor.call_later(0.0, lambda: called_copy.extend(called)) reactor.call_later(0.5, reactor.stop) reactor.run() self.assertEqual(called, ["run", "stop"]) self.assertEqual(called_copy, ["run"]) def test_call_in_thread(self): reactor = self.get_reactor() called = [] def f(a, b, c): called.append((a, b, c)) called.append(thread.get_ident()) reactor.call_in_thread(None, None, f, 1, 2, c=3) reactor.call_later(0.7, reactor.stop) reactor.run() self.assertEqual(len(called), 2) self.assertEqual(called[0], (1, 2, 3)) if not isinstance(reactor, FakeReactor): self.assertNotEquals(called[1], thread.get_ident()) def test_call_in_thread_with_callback(self): reactor = self.get_reactor() called = [] def f(): called.append("f") return 32 def callback(result): called.append("callback") called.append(result) def errback(type, value, traceback): called.append("errback") called.append((type, value, traceback)) reactor.call_in_thread(callback, errback, f) reactor.call_later(0.7, reactor.stop) reactor.run() self.assertEqual(called, ["f", "callback", 32]) def test_call_in_thread_with_errback(self): reactor = self.get_reactor() called = [] def f(): called.append("f") 1 / 0 def callback(result): called.append("callback") called.append(result) def errback(*args): called.append("errback") called.append(args) reactor.call_in_thread(callback, errback, f) reactor.call_later(0.7, reactor.stop) reactor.run() self.assertEqual(called[:2], ["f", "errback"]) self.assertEqual(len(called), 3) self.assertEqual(called[2][0], ZeroDivisionError) self.assertTrue(isinstance(called[2][1], ZeroDivisionError)) self.assertTrue(isinstance(called[2][2], types.TracebackType)) def test_call_in_thread_with_error_but_no_errback(self): self.log_helper.ignore_errors(ZeroDivisionError) reactor = self.get_reactor() called = [] def f(): called.append("f") 1 / 0 def callback(result): called.append("callback") called.append(result) reactor.call_in_thread(callback, None, f) reactor.call_later(0.7, reactor.stop) reactor.run() self.assertEqual(called, ["f"]) self.assertTrue("ZeroDivisionError" in self.logfile.getvalue(), self.logfile.getvalue()) def test_call_in_main(self): reactor = self.get_reactor() called = [] def f(): called.append("f") called.append(thread.get_ident()) reactor.call_in_main(g, 1, 2, c=3) def g(a, b, c): called.append("g") called.append(thread.get_ident()) reactor.call_in_thread(None, None, f) reactor.call_later(0.7, reactor.stop) reactor.run() self.assertEqual(len(called), 4) self.assertEqual(called[0], "f") if not isinstance(reactor, FakeReactor): self.assertNotEquals(called[1], thread.get_ident()) self.assertEqual(called[2], "g") self.assertEqual(called[3], thread.get_ident()) def test_cancelling_handlers(self): """ If a handler modifies the list of handlers in-flight, the initial list of handlers is still used (and all handlers are executed). """ reactor = self.get_reactor() calls = [] def handler_1(): reactor.cancel_call(event_id) def handler_2(): calls.append(True) # This call cancels itself. event_id = reactor.call_on("foobar", handler_1) reactor.call_on("foobar", handler_2) reactor.fire("foobar") self.assertEqual([True], calls) class FakeReactorTest(LandscapeTest, ReactorTestMixin): def get_reactor(self): return FakeReactor() def test_incremental_advance(self): reactor = self.get_reactor() called = [] def callback(): called.append(True) reactor.call_later(2, callback) self.assertFalse(called) reactor.advance(1) self.assertFalse(called) reactor.advance(1) self.assertTrue(called) def test_time(self): """ The time method of FakeReactor should return the current simulated time. """ reactor = self.get_reactor() self.assertEqual(reactor.time(), 0) reactor.advance(10.5) self.assertEqual(reactor.time(), 10.5) reactor.advance(3) self.assertEqual(reactor.time(), 13.5) class LandscapeReactorTest(LandscapeTest, ReactorTestMixin): def get_reactor(self): reactor = LandscapeReactor() # It's not possible to stop the reactor in a Trial test, calling # reactor.crash instead saved_stop = reactor._reactor.stop reactor._reactor.stop = reactor._reactor.crash self.addCleanup(lambda: setattr(reactor._reactor, "stop", saved_stop)) return reactor def test_real_time(self): reactor = self.get_reactor() self.assertTrue(reactor.time() - time.time() < 3) landscape-client-14.01/landscape/tests/test_accumulate.py0000644000175000017500000001257412301414317023376 0ustar andreasandreasfrom landscape.lib.persist import Persist from landscape.accumulate import Accumulator, accumulate from landscape.tests.helpers import LandscapeTest class AccumulateTest(LandscapeTest): """Test for the accumulate function that implements accumulation logic.""" def test_accumulate(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 4 """ accumulated_value, step_data = accumulate(0, 0, 5, 4, 5) self.assertEqual(accumulated_value, 0) self.assertEqual(step_data, (5, 4)) def test_accumulate_non_zero_accumulated_value(self): """ step: 5 10 15 --|--+--+--+--+--|--+--+--+--+--|-- value: 4 3 """ accumulated_value, step_data = accumulate(7, 8, 13, 3, 5) self.assertEqual(accumulated_value, 9) self.assertEqual(step_data, (10, float((2 * 4) + (3 * 3)) / 5)) def test_accumulate_skipped_step(self): """ step: 0 5 10 15 --|--+--+--+--+--|--+--+--+--+--|--+--+--+--+--|-- value: 0 4 """ accumulated_value, step_data = accumulate(0, 0, 12, 4, 5) self.assertEqual(accumulated_value, 8) self.assertEqual(step_data, None) def test_accumulate_within_step(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 4 """ accumulated_value, step_data = accumulate(0, 0, 2, 4, 5) self.assertEqual(accumulated_value, 8) self.assertEqual(step_data, None) def test_accumulate_within_step_with_nonzero_start_accumulated_value(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 3 4 """ accumulated_value, step_data = accumulate(2, 6, 4, 4, 5) self.assertEqual(accumulated_value, 14) self.assertEqual(step_data, None) def test_accumulate_with_first_value_on_step_boundary(self): """ step: 0 5 --|--+--+--+--+--|-- value: 14 """ accumulated_value, step_data = accumulate(0, 0, 0, 14, 5) self.assertEqual(accumulated_value, 0) self.assertEqual(step_data, None) class AccumulatorTest(LandscapeTest): """Tests for the Accumulator plugin helper class.""" def test_accumulate(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 4 """ persist = Persist() accumulate = Accumulator(persist, 5) self.assertEqual(persist.get("key"), None) step_data = accumulate(5, 4, "key") self.assertEqual(step_data, (5, 4)) self.assertEqual(persist.get("key"), (5, 0)) def test_accumulate_non_zero_accumulated_value(self): """ step: 5 10 15 --|--+--+--+--+--|--+--+--+--+--|-- value: 4 3 """ persist = Persist() accumulate = Accumulator(persist, 5) # Persist data that would have been stored when # accumulate(7, 4, "key") was called. persist.set("key", (7, 8)) step_data = accumulate(13, 3, "key") self.assertEqual(step_data, (10, float((2 * 4) + (3 * 3)) / 5)) self.assertEqual(persist.get("key"), (13, 9)) def test_accumulate_skipped_step(self): """ step: 0 5 10 15 --|--+--+--+--+--|--+--+--+--+--|--+--+--+--+--|-- value: 0 4 """ persist = Persist() accumulate = Accumulator(persist, 5) self.assertEqual(persist.get("key"), None) step_data = accumulate(12, 4, "key") self.assertEqual(step_data, None) self.assertEqual(persist.get("key"), (12, 8)) def test_accumulate_within_step(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 4 """ persist = Persist() accumulate = Accumulator(persist, 5) self.assertEqual(persist.get("key"), None) step_data = accumulate(2, 4, "key") self.assertEqual(step_data, None) self.assertEqual(persist.get("key"), (2, 8)) def test_accumulate_within_step_with_nonzero_start_accumulated_value(self): """ step: 0 5 --|--+--+--+--+--|-- value: 0 3 4 """ persist = Persist() accumulate = Accumulator(persist, 5) # Persist data that would have been stored when # accumulate(2, 3, "key") was called. persist.set("key", (2, 6)) step_data = accumulate(4, 4, "key") self.assertEqual(step_data, None) self.assertEqual(persist.get("key"), (4, 14)) def test_accumulate_with_first_value_on_step_boundary(self): """ step: 0 5 --|--+--+--+--+--|-- value: 14 """ persist = Persist() accumulate = Accumulator(persist, 5) self.assertEqual(persist.get("key"), None) step_data = accumulate(0, 14, "key") self.assertEqual(step_data, None) self.assertEqual(persist.get("key"), (0, 0)) landscape-client-14.01/landscape/tests/test_configuration.py0000644000175000017500000024647312301414317024131 0ustar andreasandreasimport os import sys from getpass import getpass from ConfigParser import ConfigParser from cStringIO import StringIO from twisted.internet.defer import succeed, fail from twisted.internet.task import Clock from landscape.lib.amp import MethodCallSender from landscape.reactor import LandscapeReactor, FakeReactor from landscape.lib.fetch import HTTPCodeError, PyCurlError from landscape.configuration import ( print_text, LandscapeSetupScript, LandscapeSetupConfiguration, register, setup, main, setup_init_script_and_start_client, stop_client_and_disable_init_script, ConfigurationError, ImportOptionError, store_public_key_data, fetch_base64_ssl_public_certificate, bootstrap_tree) from landscape.broker.registration import InvalidCredentialsError from landscape.sysvconfig import SysVConfig, ProcessError from landscape.tests.helpers import ( LandscapeTest, BrokerServiceHelper, EnvironSaverHelper) from landscape.tests.mocker import ARGS, ANY, MATCH, CONTAINS, expect from landscape.broker.amp import RemoteBroker class LandscapeConfigurationTest(LandscapeTest): def get_config(self, args, data_path=None): if data_path is None: data_path = os.path.join(self.makeDir(), "client") if "--config" not in args and "-c" not in args: filename = self.makeFile(""" [client] url = https://landscape.canonical.com/message-system """) args.extend(["--config", filename, "--data-path", data_path]) config = LandscapeSetupConfiguration() config.load(args) return config class PrintTextTest(LandscapeTest): def test_default(self): stdout_mock = self.mocker.replace("sys.stdout") self.mocker.order() stdout_mock.write("Hi!\n") stdout_mock.flush() self.mocker.unorder() # Trial likes to flush things inside run(). stdout_mock.flush() self.mocker.count(0, None) self.mocker.replay() print_text("Hi!") def test_error(self): stderr_mock = self.mocker.replace("sys.stderr") self.mocker.order() stderr_mock.write("Hi!\n") stderr_mock.flush() self.mocker.unorder() # Trial likes to flush things inside run(). stderr_mock.flush() self.mocker.count(0, None) self.mocker.replay() print_text("Hi!", error=True) def test_end(self): stdout_mock = self.mocker.replace("sys.stdout") self.mocker.order() stdout_mock.write("Hi!END") stdout_mock.flush() self.mocker.unorder() # Trial likes to flush things inside run(). stdout_mock.flush() self.mocker.count(0, None) self.mocker.replay() print_text("Hi!", "END") class LandscapeSetupScriptTest(LandscapeTest): def setUp(self): super(LandscapeSetupScriptTest, self).setUp() self.config_filename = self.makeFile() class MyLandscapeSetupConfiguration(LandscapeSetupConfiguration): default_config_filenames = [self.config_filename] self.config = MyLandscapeSetupConfiguration() self.script = LandscapeSetupScript(self.config) def test_show_help(self): print_text_mock = self.mocker.replace(print_text) print_text_mock("\nHello\n\nworld!\n") print_text_mock(ANY) self.mocker.count(0) self.mocker.replay() self.script.show_help("\n\n \n Hello \n \n world! \n \n\n") def test_prompt_simple(self): mock = self.mocker.replace(raw_input, passthrough=False) mock("Message: ") self.mocker.result("Desktop") self.mocker.replay() self.script.prompt("computer_title", "Message") self.assertEqual(self.config.computer_title, "Desktop") def test_prompt_with_default(self): mock = self.mocker.replace(raw_input, passthrough=False) mock("Message [default]: ") self.mocker.result("") self.mocker.replay() self.config.computer_title = "default" self.script.prompt("computer_title", "Message") self.assertEqual(self.config.computer_title, "default") def test_prompt_with_required(self): self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) script_mock = self.mocker.patch(self.script) raw_input_mock("Message: ") self.mocker.result("") script_mock.show_help("This option is required to " "configure Landscape.") raw_input_mock("Message: ") self.mocker.result("Desktop") self.mocker.replay() self.script.prompt("computer_title", "Message", True) self.assertEqual(self.config.computer_title, "Desktop") def test_prompt_with_required_and_default(self): self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) raw_input_mock("Message [Desktop]: ") self.mocker.result("") self.mocker.replay() self.config.computer_title = "Desktop" self.script.prompt("computer_title", "Message", True) self.assertEqual(self.config.computer_title, "Desktop") def test_prompt_for_unknown_variable(self): """ It should be possible to prompt() defining a variable that doesn't 'exist' in the configuration, and still have it set there. """ self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.assertFalse(hasattr(self.config, "variable")) self.expect(raw_input_mock("Variable: ")).result("Yay") self.mocker.replay() self.script.prompt("variable", "Variable") self.assertEqual(self.config.variable, "Yay") def test_password_prompt_simple_matching(self): mock = self.mocker.replace(getpass, passthrough=False) mock("Password: ") self.mocker.result("password") mock("Please confirm: ") self.mocker.result("password") self.mocker.replay() self.script.password_prompt("registration_key", "Password") self.assertEqual(self.config.registration_key, "password") def test_password_prompt_simple_non_matching(self): mock = self.mocker.replace(getpass, passthrough=False) mock("Password: ") self.mocker.result("password") script_mock = self.mocker.patch(self.script) script_mock.show_help("Keys must match.") mock("Please confirm: ") self.mocker.result("") mock("Password: ") self.mocker.result("password") mock("Please confirm: ") self.mocker.result("password") self.mocker.replay() self.script.password_prompt("registration_key", "Password") self.assertEqual(self.config.registration_key, "password") def test_password_prompt_simple_matching_required(self): mock = self.mocker.replace(getpass, passthrough=False) mock("Password: ") self.mocker.result("") script_mock = self.mocker.patch(self.script) script_mock.show_help("This option is required to " "configure Landscape.") mock("Password: ") self.mocker.result("password") mock("Please confirm: ") self.mocker.result("password") self.mocker.replay() self.script.password_prompt("registration_key", "Password", True) self.assertEqual(self.config.registration_key, "password") def test_prompt_yes_no(self): comparisons = [("Y", True), ("y", True), ("yEs", True), ("YES", True), ("n", False), ("N", False), ("No", False), ("no", False), ("", True)] self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) for comparison in comparisons: self.expect(raw_input_mock("Foo [Y/n]")).result(comparison[0]) self.mocker.replay() for comparison in comparisons: self.assertEqual(self.script.prompt_yes_no("Foo"), comparison[1]) def test_prompt_yes_no_default(self): self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock("Foo [y/N]")).result("") self.mocker.replay() self.assertFalse(self.script.prompt_yes_no("Foo", default=False)) def test_prompt_yes_no_invalid(self): self.mocker.order() raw_input_mock = self.mocker.replace(raw_input, passthrough=False) script_mock = self.mocker.patch(self.script) self.expect(raw_input_mock("Foo [Y/n]")).result("x") script_mock.show_help("Invalid input.") self.expect(raw_input_mock("Foo [Y/n]")).result("n") self.mocker.replay() self.assertFalse(self.script.prompt_yes_no("Foo")) def get_matcher(self, help_snippet): def match_help(help): return help.strip().startswith(help_snippet) return MATCH(match_help) def test_query_computer_title(self): help_snippet = "The computer title you" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt("computer_title", "This computer's title", True) self.mocker.replay() self.script.query_computer_title() def test_query_computer_title_defined_on_command_line(self): raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["-t", "Computer title"]) self.script.query_computer_title() def test_query_account_name(self): help_snippet = "You must now specify the name of the Landscape account" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt("account_name", "Account name", True) self.mocker.replay() self.script.query_account_name() def test_query_account_name_defined_on_command_line(self): raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["-a", "Account name"]) self.script.query_account_name() def test_query_registration_key(self): help_snippet = "A registration key may be" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.password_prompt("registration_key", "Account registration key") self.mocker.replay() self.script.query_registration_key() def test_query_registration_key_defined_on_command_line(self): getpass_mock = self.mocker.replace("getpass.getpass", passthrough=False) self.expect(getpass_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["-p", "shared-secret"]) self.script.query_registration_key() def test_query_proxies(self): help_snippet = "The Landscape client communicates" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt("http_proxy", "HTTP proxy URL") script_mock.prompt("https_proxy", "HTTPS proxy URL") self.mocker.replay() self.script.query_proxies() def test_query_proxies_defined_on_command_line(self): raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["--http-proxy", "localhost:8080", "--https-proxy", "localhost:8443"]) self.script.query_proxies() def test_query_http_proxy_defined_on_command_line(self): help_snippet = "The Landscape client communicates" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt("https_proxy", "HTTPS proxy URL") self.mocker.replay() self.config.load_command_line(["--http-proxy", "localhost:8080"]) self.script.query_proxies() def test_query_https_proxy_defined_on_command_line(self): help_snippet = "The Landscape client communicates" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt("http_proxy", "HTTP proxy URL") self.mocker.replay() self.config.load_command_line(["--https-proxy", "localhost:8443"]) self.script.query_proxies() def test_query_script_plugin_no(self): help_snippet = "Landscape has a feature which enables administrators" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(False) self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "") def test_query_script_plugin_yes(self): """ If the user *does* want script execution, then the script asks which users to enable it for. """ help_snippet = "Landscape has a feature which enables administrators" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(True) script_mock.show_help( self.get_matcher("By default, scripts are restricted")) script_mock.prompt("script_users", "Script users") self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "ScriptExecution") def test_disable_script_plugin(self): """ Answering NO to enabling the script plugin while it's already enabled will disable it. """ self.config.include_manager_plugins = "ScriptExecution" help_snippet = "Landscape has a feature which enables administrators" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt_yes_no("Enable script execution?", default=True) self.mocker.result(False) self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "") def test_disabling_script_plugin_leaves_existing_inclusions(self): """ Disabling the script execution plugin doesn't remove other included plugins. """ self.config.include_manager_plugins = "FooPlugin, ScriptExecution" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(ANY) script_mock.prompt_yes_no("Enable script execution?", default=True) self.mocker.result(False) self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "FooPlugin") def test_enabling_script_plugin_leaves_existing_inclusions(self): """ Enabling the script execution plugin doesn't remove other included plugins. """ self.config.include_manager_plugins = "FooPlugin" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(ANY) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(True) script_mock.show_help(ANY) script_mock.prompt("script_users", "Script users") self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "FooPlugin, ScriptExecution") def test_query_script_plugin_defined_on_command_line(self): raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line( ["--include-manager-plugins", "ScriptExecution", "--script-users", "root, nobody"]) self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "ScriptExecution") self.assertEqual(self.config.script_users, "root, nobody") def test_query_script_manager_plugins_defined_on_command_line(self): self.config.include_manager_plugins = "FooPlugin" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(ANY) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(True) script_mock.show_help(ANY) script_mock.prompt("script_users", "Script users") self.mocker.replay() self.config.load_command_line( ["--include-manager-plugins", "FooPlugin, ScriptExecution"]) self.script.query_script_plugin() self.assertEqual(self.config.include_manager_plugins, "FooPlugin, ScriptExecution") def test_query_script_users_defined_on_command_line(self): """ Confirm with the user for users specified for the ScriptPlugin. """ pwnam_mock = self.mocker.replace("pwd.getpwnam") pwnam_mock("landscape") self.mocker.result(None) self.config.include_manager_plugins = "FooPlugin" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(ANY) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(True) script_mock.show_help(ANY) script_mock.prompt_get_input( "Script users [root, nobody, landscape]: ", False) self.mocker.replay() self.config.load_command_line( ["--script-users", "root, nobody, landscape"]) self.script.query_script_plugin() self.assertEqual(self.config.script_users, "root, nobody, landscape") def test_query_script_users_on_command_line_with_unknown_user(self): """ If several users are provided on the command line, we verify the users and raise a ConfigurationError if any are unknown on this system. """ pwnam_mock = self.mocker.replace("pwd.getpwnam") pwnam_mock("root") self.mocker.result(None) pwnam_mock("nobody") self.mocker.result(None) pwnam_mock("landscape") self.mocker.result(None) pwnam_mock("unknown") self.mocker.throw(KeyError()) self.mocker.replay() self.config.load_command_line( ["--script-users", "root, nobody, landscape, unknown", "--include-manager-plugins", "ScriptPlugin"]) self.assertRaises(ConfigurationError, self.script.query_script_plugin) def test_query_script_users_defined_on_command_line_with_all_user(self): """ We shouldn't accept all as a synonym for ALL """ self.config.load_command_line( ["--script-users", "all", "--include-manager-plugins", "ScriptPlugin"]) self.assertRaises(ConfigurationError, self.script.query_script_plugin) def test_query_script_users_defined_on_command_line_with_ALL_user(self): """ ALL is the special marker for all users. """ self.config.load_command_line( ["--script-users", "ALL", "--include-manager-plugins", "ScriptPlugin"]) self.script.query_script_plugin() self.assertEqual(self.config.script_users, "ALL") def test_query_script_users_command_line_with_ALL_and_extra_user(self): """ If ALL and additional users are provided as the users on the command line, this should raise an appropriate ConfigurationError. """ self.config.load_command_line( ["--script-users", "ALL, kevin", "--include-manager-plugins", "ScriptPlugin"]) self.assertRaises(ConfigurationError, self.script.query_script_plugin) def test_invalid_user_entered_by_user(self): """ If an invalid user is entered on the command line the user should be informed and prompted again. """ help_snippet = "Landscape has a feature which enables administrators" self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) script_mock.prompt_yes_no("Enable script execution?", default=False) self.mocker.result(True) script_mock.show_help( self.get_matcher("By default, scripts are restricted")) script_mock.prompt_get_input("Script users: ", False) self.mocker.result(u"nonexistent") script_mock.show_help("Unknown system users: nonexistent") script_mock.prompt_get_input("Script users: ", False) self.mocker.result(u"root") self.mocker.replay() self.script.query_script_plugin() self.assertEqual(self.config.script_users, "root") def test_tags_not_defined_on_command_line(self): """ If tags are not provided, the user should be prompted for them. """ self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help("You may provide tags for this computer e.g. " "server,precise.") script_mock.prompt("tags", "Tags", False) self.mocker.replay() self.script.query_tags() def test_invalid_tags_entered_by_user(self): """ If tags are not provided, the user should be prompted for them, and they should be valid tags, if not the user should be prompted for them again. """ script_mock = self.mocker.patch(self.script) script_mock.show_help("You may provide tags for this computer e.g. " "server,precise.") script_mock.prompt_get_input("Tags: ", False) self.mocker.result(u"") script_mock.show_help("Tag names may only contain alphanumeric " "characters.") script_mock.prompt_get_input("Tags: ", False) self.mocker.result(u"london") self.mocker.replay() self.script.query_tags() def test_tags_defined_on_command_line(self): """ Tags defined on the command line can be verified by the user. """ raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["--tags", u"server,london"]) self.script.query_tags() self.assertEqual(self.config.tags, u"server,london") def test_invalid_tags_defined_on_command_line_raises_error(self): """ Invalid tags on the command line raises a ConfigurationError. """ raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["--tags", u""]) self.assertRaises(ConfigurationError, self.script.query_tags) def test_access_group_not_defined_on_command_line(self): """ If an access group is not provided, the user should be prompted for it. """ self.mocker.order() script_mock = self.mocker.patch(self.script) script_mock.show_help("You may provide an access group for this " "computer e.g. webservers.") script_mock.prompt("access_group", "Access group", False) self.mocker.replay() self.script.query_access_group() def test_access_group_defined_on_command_line(self): """ An access group defined on the command line can be verified by the user. """ raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() self.config.load_command_line(["--access-group", u"webservers"]) self.script.query_access_group() self.assertEqual(self.config.access_group, u"webservers") def test_show_header(self): help_snippet = "This script will" script_mock = self.mocker.patch(self.script) script_mock.show_help(self.get_matcher(help_snippet)) self.mocker.replay() self.script.show_header() def test_run(self): script_mock = self.mocker.patch(self.script) script_mock.show_header() script_mock.query_computer_title() script_mock.query_account_name() script_mock.query_registration_key() script_mock.query_proxies() script_mock.query_script_plugin() script_mock.query_access_group() script_mock.query_tags() self.mocker.replay() self.script.run() class BootstrapTreeTest(LandscapeConfigurationTest): def test_bootstrap_tree(self): """ The L{bootstrap_tree} function creates the client dir and /annotations.d under it with the correct permissions. """ client_path = self.makeDir() annotations_path = os.path.join(client_path, "annotations.d") mock_chmod = self.mocker.replace("os.chmod") mock_chmod(client_path, 0755) mock_chmod(annotations_path, 0755) self.mocker.replay() config = self.get_config([], data_path=client_path) bootstrap_tree(config) self.assertTrue(os.path.isdir(client_path)) self.assertTrue(os.path.isdir(annotations_path)) class ConfigurationFunctionsTest(LandscapeConfigurationTest): helpers = [EnvironSaverHelper] def setUp(self): super(ConfigurationFunctionsTest, self).setUp() self.mocker.replace("os.getuid")() self.mocker.count(0, None) self.mocker.result(0) # Make bootstrap_tree a no-op as a a non-root user can't change # ownership. self.mocker.replace("landscape.configuration.bootstrap_tree")(ANY) self.mocker.count(0, None) def get_content(self, config): """Write C{config} to a file and return it's contents as a string.""" config_file = self.makeFile("") original_config = config.config try: config.config = config_file config.write() return open(config.config, "r").read().strip() + "\n" finally: config.config = original_config def test_setup(self): filename = self.makeFile("[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" "http_proxy = http://old.proxy\n" "https_proxy = https://old.proxy\n" "url = http://url\n" "include_manager_plugins = ScriptExecution\n" "access_group = webservers\n" "tags = london, server") raw_input = self.mocker.replace("__builtin__.raw_input", name="raw_input") getpass = self.mocker.replace("getpass.getpass") C = CONTAINS expect(raw_input(C("[Old Title]"))).result("New Title") expect(raw_input(C("[Old Name]"))).result("New Name") expect(getpass(C("Account registration key:"))).result("New Password") expect(getpass(C("Please confirm:"))).result("New Password") expect(raw_input(C("[http://old.proxy]"))).result("http://new.proxy") expect(raw_input(C("[https://old.proxy]"))).result("https://new.proxy") expect(raw_input(C("Enable script execution? [Y/n]"))).result("n") expect(raw_input(C("Access group [webservers]: "))).result( u"databases") expect(raw_input(C("Tags [london, server]: "))).result( u"glasgow, laptop") # Negative assertion. We don't want it called in any other way. expect(raw_input(ANY)).count(0) # We don't care about these here, but don't show any output please. print_text_mock = self.mocker.replace(print_text) expect(print_text_mock(ANY)).count(0, None) self.mocker.replay() config = self.get_config(["--no-start", "--config", filename]) setup(config) self.assertEqual(type(config), LandscapeSetupConfiguration) # Reload it to ensure it was written down. config.reload() self.assertEqual(config.computer_title, "New Title") self.assertEqual(config.account_name, "New Name") self.assertEqual(config.registration_key, "New Password") self.assertEqual(config.http_proxy, "http://new.proxy") self.assertEqual(config.https_proxy, "https://new.proxy") self.assertEqual(config.include_manager_plugins, "") self.assertEqual(config.access_group, u"databases") self.assertEqual(config.tags, u"glasgow, laptop") def test_silent_setup(self): """ Only command-line options are used in silent mode and registration is attempted. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.replay() config = self.get_config(["--silent", "-a", "account", "-t", "rex"]) setup(config) self.assertConfigEqual(self.get_content(config), """\ [client] computer_title = rex data_path = %s account_name = account url = https://landscape.canonical.com/message-system """ % config.data_path) def test_silent_setup_no_register(self): """ Called with command line options to write a config file but no registration or validation of parameters is attempted. """ # Make sure no sysvconfig modifications are attempted self.mocker.patch(SysVConfig) self.mocker.replay() config = self.get_config(["--silent", "--no-start"]) setup(config) self.assertConfigEqual(self.get_content(config), """\ [client] data_path = %s url = https://landscape.canonical.com/message-system """ % config.data_path) def test_silent_setup_no_register_with_default_preseed_params(self): """ Make sure that the configuration can be used to write the configuration file after a fresh install. """ # Make sure no sysvconfig modifications are attempted self.mocker.patch(SysVConfig) self.mocker.replay() args = ["--silent", "--no-start", "--computer-title", "", "--account-name", "", "--registration-key", "", "--url", "https://landscape.canonical.com/message-system", "--exchange-interval", "900", "--urgent-exchange-interval", "60", "--ping-url", "http://landscape.canonical.com/ping", "--ping-interval", "30", "--http-proxy", "", "--https-proxy", "", "--otp", "", "--tags", "", "--provisioning-otp", ""] config = self.get_config(args) setup(config) self.assertConfigEqual( self.get_content(config), "[client]\n" "http_proxy = \n" "tags = \n" "data_path = %s\n" "registration_key = \n" "account_name = \n" "computer_title = \n" "https_proxy = \n" "url = https://landscape.canonical.com/message-system\n" "exchange_interval = 900\n" "otp = \n" "ping_interval = 30\n" "ping_url = http://landscape.canonical.com/ping\n" "provisioning_otp = \n" "urgent_exchange_interval = 60\n" % config.data_path) def test_silent_setup_without_computer_title(self): """A computer title is required.""" sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) self.mocker.replay() config = self.get_config(["--silent", "-a", "account"]) self.assertRaises(ConfigurationError, setup, config) def test_silent_setup_without_account_name(self): """An account name is required.""" sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) self.mocker.replay() config = self.get_config(["--silent", "-t", "rex"]) self.assertRaises(ConfigurationError, setup, config) def test_silent_setup_with_otp(self): """ If the OTP is specified, there is no need to pass the account name and the computer title. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) self.mocker.replay() config = self.get_config(["--silent", "--otp", "otp1"]) setup(config) self.assertEqual("otp1", config.otp) def test_silent_setup_with_provisioning_otp(self): """ If the provisioning OTP is specified, there is no need to pass the account name and the computer title. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.replay() config = self.get_config(["--silent", "--provisioning-otp", "otp1"]) setup(config) self.assertEqual("otp1", config.provisioning_otp) def test_silent_script_users_imply_script_execution_plugin(self): """ If C{--script-users} is specified, without C{ScriptExecution} in the list of manager plugins, it will be automatically added. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) raw_input_mock = self.mocker.replace(raw_input, passthrough=False) self.expect(raw_input_mock(ANY)).count(0) self.mocker.replay() filename = self.makeFile(""" [client] url = https://localhost:8080/message-system bus = session """) config = self.get_config(["--config", filename, "--silent", "-a", "account", "-t", "rex", "--script-users", "root, nobody"]) setup(config) parser = ConfigParser() parser.read(filename) self.assertEqual( {"url": "https://localhost:8080/message-system", "bus": "session", "computer_title": "rex", "include_manager_plugins": "ScriptExecution", "script_users": "root, nobody", "account_name": "account"}, dict(parser.items("client"))) def test_silent_script_users_with_all_user(self): """ In silent mode, we shouldn't accept invalid users, it should raise a configuration error. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) self.mocker.replay() config = self.get_config( ["--script-users", "all", "--include-manager-plugins", "ScriptPlugin", "-a", "account", "-t", "rex", "--silent"]) self.assertRaises(ConfigurationError, setup, config) def test_silent_setup_with_ping_url(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) self.mocker.replay() filename = self.makeFile(""" [client] ping_url = http://landscape.canonical.com/ping registration_key = shared-secret log_level = debug random_key = random_value """) config = self.get_config(["--config", filename, "--silent", "-a", "account", "-t", "rex", "--ping-url", "http://localhost/ping"]) setup(config) parser = ConfigParser() parser.read(filename) self.assertEqual( {"log_level": "debug", "registration_key": "shared-secret", "ping_url": "http://localhost/ping", "random_key": "random_value", "computer_title": "rex", "account_name": "account"}, dict(parser.items("client"))) def test_setup_with_proxies_from_environment(self): os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" script_mock = self.mocker.patch(LandscapeSetupScript) script_mock.run() filename = self.makeFile("[client]\n" "url = http://url\n") self.mocker.replay() config = self.get_config(["--no-start", "--config", filename]) setup(config) # Reload it to ensure it was written down. config.reload() self.assertEqual(config.http_proxy, "http://environ") self.assertEqual(config.https_proxy, "https://environ") def test_silent_setup_with_proxies_from_environment(self): """ Only command-line options are used in silent mode and registration is attempted. """ os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.replay() filename = self.makeFile(""" [client] registration_key = shared-secret """) config = self.get_config(["--config", filename, "--silent", "-a", "account", "-t", "rex"]) setup(config) parser = ConfigParser() parser.read(filename) self.assertEqual( {"registration_key": "shared-secret", "http_proxy": "http://environ", "https_proxy": "https://environ", "computer_title": "rex", "account_name": "account"}, dict(parser.items("client"))) def test_setup_prefers_proxies_from_config_over_environment(self): os.environ["http_proxy"] = "http://environ" os.environ["https_proxy"] = "https://environ" script_mock = self.mocker.patch(LandscapeSetupScript) script_mock.run() filename = self.makeFile("[client]\n" "http_proxy = http://config\n" "https_proxy = https://config\n" "url = http://url\n") self.mocker.replay() config = self.get_config(["--no-start", "--config", filename]) setup(config) # Reload it to enusre it was written down. config.reload() self.assertEqual(config.http_proxy, "http://config") self.assertEqual(config.https_proxy, "https://config") def test_main_no_registration(self): setup_mock = self.mocker.replace(setup) setup_mock(ANY) raw_input_mock = self.mocker.replace(raw_input) raw_input_mock("\nRequest a new registration for " "this computer now? (Y/n): ") self.mocker.result("n") # This must not be called. register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY) self.mocker.count(0) self.mocker.replay() main(["-c", self.make_working_config()]) def test_main_silent(self): """ In silent mode, the client should register when the registration details are changed/set. """ setup_mock = self.mocker.replace(setup) setup_mock(ANY) register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY) self.mocker.count(1) self.mocker.replay() config_filename = self.makeFile( "[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" ) main(["-c", config_filename, "--silent"]) def make_working_config(self): return self.makeFile("[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" "http_proxy = http://old.proxy\n" "https_proxy = https://old.proxy\n" "url = http://url\n") def test_register(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.is_configured_to_run() self.mocker.result(False) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() script_mock = self.mocker.patch(LandscapeSetupScript) script_mock.run() raw_input_mock = self.mocker.replace(raw_input) raw_input_mock("\nRequest a new registration for " "this computer now? (Y/n): ") self.mocker.result("") raw_input_mock("\nThe Landscape client must be started " "on boot to operate correctly.\n\n" "Start Landscape client on boot? (Y/n): ") self.mocker.result("") register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY) self.mocker.replay() main(["--config", self.make_working_config()]) def test_errors_from_restart_landscape(self): """ If a ProcessError exception is raised from restart_landscape (because the client failed to be restarted), an informative message is printed and the script exits. """ sysvconfig_mock = self.mocker.patch(SysVConfig) print_text_mock = self.mocker.replace(print_text) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.throw(ProcessError) print_text_mock("Couldn't restart the Landscape client.", error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) self.mocker.replay() config = self.get_config(["--silent", "-a", "account", "-t", "rex"]) system_exit = self.assertRaises(SystemExit, setup, config) self.assertEqual(system_exit.code, 2) def test_errors_from_restart_landscape_ok_no_register(self): """ Exit code 0 will be returned if the client fails to be restarted and --ok-no-register was passed. """ sysvconfig_mock = self.mocker.patch(SysVConfig) print_text_mock = self.mocker.replace(print_text) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.throw(ProcessError) print_text_mock("Couldn't restart the Landscape client.", error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) self.mocker.replay() config = self.get_config(["--silent", "-a", "account", "-t", "rex", "--ok-no-register"]) system_exit = self.assertRaises(SystemExit, setup, config) self.assertEqual(system_exit.code, 0) def test_main_with_register(self): setup_mock = self.mocker.replace(setup) setup_mock(ANY) raw_input_mock = self.mocker.replace(raw_input) raw_input_mock("\nRequest a new registration for " "this computer now? (Y/n): ") self.mocker.result("") register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY) self.mocker.replay() main(["-c", self.make_working_config()]) def test_setup_init_script_and_start_client(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) self.mocker.replay() setup_init_script_and_start_client() def test_setup_init_script_and_start_client_silent(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) raw_input_mock = self.mocker.replace(raw_input, passthrough=False) raw_input_mock(ANY) self.mocker.count(0) self.mocker.replay() setup_init_script_and_start_client() def test_register_silent(self): """ Silent registration uses specified configuration to attempt a registration with the server. """ setup_mock = self.mocker.replace(setup) setup_mock(ANY) # No interaction should be requested. raw_input_mock = self.mocker.replace(raw_input) raw_input_mock(ANY) self.mocker.count(0) # The registration logic should be called and passed the configuration # file. register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY) self.mocker.replay() main(["--silent", "-c", self.make_working_config()]) def test_disable(self): stop_client_and_disable_init_script_mock = self.mocker.replace( stop_client_and_disable_init_script) stop_client_and_disable_init_script_mock() # No interaction should be requested. raw_input_mock = self.mocker.replace(raw_input) raw_input_mock(ANY) self.mocker.count(0) # Registration logic should not be invoked. register_mock = self.mocker.replace(register, passthrough=False) register_mock(ANY, ANY, ANY) self.mocker.count(0) self.mocker.replay() main(["--disable", "-c", self.make_working_config()]) def test_stop_client_and_disable_init_scripts(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(False) sysvconfig_mock.stop_landscape() self.mocker.replay() main(["--disable", "-c", self.make_working_config()]) def test_non_root(self): self.mocker.reset() # Forget the thing done in setUp self.mocker.replace("os.getuid")() self.mocker.result(1000) self.mocker.replay() sys_exit = self.assertRaises(SystemExit, main, ["-c", self.make_working_config()]) self.assertIn("landscape-config must be run as root", str(sys_exit)) def test_main_with_help_and_non_root(self): """It's possible to call 'landscape-config --help' as normal user.""" self.mocker.reset() # Forget the thing done in setUp output = StringIO() self.mocker.replace("sys.stdout").write(ANY) self.mocker.call(output.write) self.mocker.replay() self.assertRaises(SystemExit, main, ["--help"]) self.assertIn("show this help message and exit", output.getvalue()) def test_main_with_help_and_non_root_short(self): """It's possible to call 'landscape-config -h' as normal user.""" self.mocker.reset() # Forget the thing done in setUp output = StringIO() self.mocker.replace("sys.stdout").write(ANY) self.mocker.call(output.write) self.mocker.replay() self.assertRaises(SystemExit, main, ["-h"]) self.assertIn("show this help message and exit", output.getvalue()) def test_import_from_file(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) self.mocker.replay() configuration = ( "[client]\n" "computer_title = New Title\n" "account_name = New Name\n" "registration_key = New Password\n" "http_proxy = http://new.proxy\n" "https_proxy = https://new.proxy\n" "url = http://new.url\n") import_filename = self.makeFile(configuration, basename="import_config") config_filename = self.makeFile("", basename="final_config") config = self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) setup(config) options = ConfigParser() options.read(config_filename) self.assertEqual(dict(options.items("client")), {"computer_title": "New Title", "account_name": "New Name", "registration_key": "New Password", "http_proxy": "http://new.proxy", "https_proxy": "https://new.proxy", "url": "http://new.url"}) def test_import_from_empty_file(self): self.mocker.replay() config_filename = self.makeFile("", basename="final_config") import_filename = self.makeFile("", basename="import_config") # Use a command line option as well to test the precedence. try: self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) except ImportOptionError, error: self.assertEqual(str(error), "Nothing to import at %s." % import_filename) else: self.fail("ImportOptionError not raised") def test_import_from_non_existent_file(self): self.mocker.replay() config_filename = self.makeFile("", basename="final_config") import_filename = self.makeFile(basename="import_config") # Use a command line option as well to test the precedence. try: self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) except ImportOptionError, error: self.assertEqual(str(error), "File %s doesn't exist." % import_filename) else: self.fail("ImportOptionError not raised") def test_import_from_file_with_empty_client_section(self): self.mocker.replay() old_configuration = "[client]\n" config_filename = self.makeFile("", old_configuration, basename="final_config") import_filename = self.makeFile("", basename="import_config") # Use a command line option as well to test the precedence. try: self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) except ImportOptionError, error: self.assertEqual(str(error), "Nothing to import at %s." % import_filename) else: self.fail("ImportOptionError not raised") def test_import_from_bogus_file(self): self.mocker.replay() config_filename = self.makeFile("", basename="final_config") import_filename = self.makeFile("BOGUS!", basename="import_config") # Use a command line option as well to test the precedence. try: self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) except ImportOptionError, error: self.assertIn("Nothing to import at %s" % import_filename, str(error)) else: self.fail("ImportOptionError not raised") def test_import_from_unreadable_file(self): """ An error is raised when unable to read configuration from the specified file. """ self.mocker.replay() import_filename = self.makeFile( "[client]\nfoo=bar", basename="import_config") # Remove read permissions os.chmod(import_filename, os.stat(import_filename).st_mode - 0444) error = self.assertRaises( ImportOptionError, self.get_config, ["--import", import_filename]) expected_message = ("Couldn't read configuration from %s." % import_filename) self.assertEqual(str(error), expected_message) def test_import_from_file_preserves_old_options(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) self.mocker.replay() old_configuration = ( "[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" "http_proxy = http://old.proxy\n" "https_proxy = https://old.proxy\n" "url = http://old.url\n") new_configuration = ( "[client]\n" "account_name = New Name\n" "registration_key = New Password\n" "url = http://new.url\n") config_filename = self.makeFile(old_configuration, basename="final_config") import_filename = self.makeFile(new_configuration, basename="import_config") # Use a command line option as well to test the precedence. config = self.get_config(["--config", config_filename, "--silent", "--import", import_filename, "-p", "Command Line Password"]) setup(config) options = ConfigParser() options.read(config_filename) self.assertEqual(dict(options.items("client")), {"computer_title": "Old Title", "account_name": "New Name", "registration_key": "Command Line Password", "http_proxy": "http://old.proxy", "https_proxy": "https://old.proxy", "url": "http://new.url"}) def test_import_from_file_may_reset_old_options(self): """ This test ensures that setting an empty option in an imported configuration file will actually set the local value to empty too, rather than being ignored. """ sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) self.mocker.replay() old_configuration = ( "[client]\n" "computer_title = Old Title\n" "account_name = Old Name\n" "registration_key = Old Password\n" "url = http://old.url\n") new_configuration = ( "[client]\n" "registration_key =\n") config_filename = self.makeFile(old_configuration, basename="final_config") import_filename = self.makeFile(new_configuration, basename="import_config") config = self.get_config(["--config", config_filename, "--silent", "--import", import_filename]) setup(config) options = ConfigParser() options.read(config_filename) self.assertEqual(dict(options.items("client")), {"computer_title": "Old Title", "account_name": "Old Name", "registration_key": "", # <== "url": "http://old.url"}) def test_import_from_url(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) configuration = ( "[client]\n" "computer_title = New Title\n" "account_name = New Name\n" "registration_key = New Password\n" "http_proxy = http://new.proxy\n" "https_proxy = https://new.proxy\n" "url = http://new.url\n") fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.result(configuration) print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() config_filename = self.makeFile("", basename="final_config") config = self.get_config(["--config", config_filename, "--silent", "--import", "https://config.url"]) setup(config) options = ConfigParser() options.read(config_filename) self.assertEqual(dict(options.items("client")), {"computer_title": "New Title", "account_name": "New Name", "registration_key": "New Password", "http_proxy": "http://new.proxy", "https_proxy": "https://new.proxy", "url": "http://new.url"}) def test_import_from_url_with_http_code_fetch_error(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.throw(HTTPCodeError(501, "")) print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() config_filename = self.makeFile("", basename="final_config") try: self.get_config(["--config", config_filename, "--silent", "--import", "https://config.url"]) except ImportOptionError, error: self.assertEqual(str(error), "Couldn't download configuration from " "https://config.url: Server " "returned HTTP code 501") else: self.fail("ImportOptionError not raised") def test_import_from_url_with_pycurl_error(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.throw(PyCurlError(60, "pycurl message")) print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() config_filename = self.makeFile("", basename="final_config") try: self.get_config(["--config", config_filename, "--silent", "--import", "https://config.url"]) except ImportOptionError, error: self.assertEqual(str(error), "Couldn't download configuration from " "https://config.url: Error 60: pycurl message") else: self.fail("ImportOptionError not raised") def test_import_from_url_with_empty_content(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.result("") print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() # Use a command line option as well to test the precedence. try: self.get_config(["--silent", "--import", "https://config.url"]) except ImportOptionError, error: self.assertEqual(str(error), "Nothing to import at https://config.url.") else: self.fail("ImportOptionError not raised") def test_import_from_url_with_bogus_content(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.result("BOGUS!") print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() # Use a command line option as well to test the precedence. try: self.get_config(["--silent", "--import", "https://config.url"]) except ImportOptionError, error: self.assertEqual("Nothing to import at https://config.url.", str(error)) else: self.fail("ImportOptionError not raised") def test_import_error_is_handled_nicely_by_main(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.throw(HTTPCodeError(404, "")) print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") print_text_mock(CONTAINS("Server returned HTTP code 404"), error=True) self.mocker.replay() system_exit = self.assertRaises( SystemExit, main, ["--import", "https://config.url"]) self.assertEqual(system_exit.code, 1) def test_base64_ssl_public_key_is_exported_to_file(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) data_path = self.makeDir() config_filename = self.makeFile("[client]\ndata_path=%s" % data_path) key_filename = os.path.join(data_path, os.path.basename(config_filename) + ".ssl_public_key") print_text_mock = self.mocker.replace(print_text) print_text_mock("Writing SSL CA certificate to %s..." % key_filename) self.mocker.replay() config = self.get_config(["--silent", "-c", config_filename, "-u", "url", "-a", "account", "-t", "title", "--ssl-public-key", "base64:SGkgdGhlcmUh"]) config.data_path = data_path setup(config) self.assertEqual("Hi there!", open(key_filename, "r").read()) options = ConfigParser() options.read(config_filename) self.assertEqual(options.get("client", "ssl_public_key"), key_filename) def test_normal_ssl_public_key_is_not_exported_to_file(self): sysvconfig_mock = self.mocker.patch(SysVConfig) sysvconfig_mock.set_start_on_boot(True) sysvconfig_mock.restart_landscape() self.mocker.result(True) self.mocker.replay() config_filename = self.makeFile("") config = self.get_config(["--silent", "-c", config_filename, "-u", "url", "-a", "account", "-t", "title", "--ssl-public-key", "/some/filename"]) setup(config) key_filename = config_filename + ".ssl_public_key" self.assertFalse(os.path.isfile(key_filename)) options = ConfigParser() options.read(config_filename) self.assertEqual(options.get("client", "ssl_public_key"), "/some/filename") # We test them individually since they must work individually. def test_import_from_url_honors_http_proxy(self): self.ensure_import_from_url_honors_proxy_options("http_proxy") def test_import_from_url_honors_https_proxy(self): self.ensure_import_from_url_honors_proxy_options("https_proxy") def ensure_import_from_url_honors_proxy_options(self, proxy_option): def check_proxy(url): self.assertEqual(os.environ.get(proxy_option), "http://proxy") fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("https://config.url") self.mocker.call(check_proxy) # Doesn't matter. We just want to check the context around it. self.mocker.result("") print_text_mock = self.mocker.replace(print_text) print_text_mock("Fetching configuration from https://config.url...") self.mocker.replay() config_filename = self.makeFile("", basename="final_config") try: self.get_config(["--config", config_filename, "--silent", "--" + proxy_option.replace("_", "-"), "http://proxy", "--import", "https://config.url"]) except ImportOptionError: pass # The returned content is empty. We don't really # care for this test. Mocker will ensure the tests # we care about are done. class RegisterFunctionTest(LandscapeConfigurationTest): helpers = [BrokerServiceHelper] def setUp(self): super(RegisterFunctionTest, self).setUp() self.config = LandscapeSetupConfiguration() self.config.load(["-c", self.config_filename]) def test_register_success(self): service = self.broker_service registration_mock = self.mocker.replace(service.registration) config_mock = self.mocker.replace(service.config) print_text_mock = self.mocker.replace(print_text) reactor_mock = self.mocker.patch(FakeReactor) # This must necessarily happen in the following order. self.mocker.order() # This very informative message is printed out. print_text_mock("Please wait... ", "") time_mock = self.mocker.replace("time") time_mock.sleep(ANY) self.mocker.count(1) # After a nice dance the configuration is reloaded. config_mock.reload() # The register() method is called. We fire the "registration-done" # event after it's done, so that it cascades into a deferred callback. def register_done(): service.reactor.fire("registration-done") registration_mock.register() self.mocker.call(register_done) # The deferred callback finally prints out this message. print_text_mock("System successfully registered.") reactor_mock.stop() # This is actually called after everything else since all deferreds # are synchronous and callbacks will be executed immediately. reactor_mock.run() # Nothing else is printed! print_text_mock(ANY) self.mocker.count(0) self.mocker.replay() # DO IT! return register(self.config, print_text, sys.exit, reactor=FakeReactor()) def test_register_failure(self): """ When registration fails because of invalid credentials, a message will be printed to the console and the program will exit. """ service = self.broker_service self.log_helper.ignore_errors(InvalidCredentialsError) registration_mock = self.mocker.replace(service.registration) config_mock = self.mocker.replace(service.config) print_text_mock = self.mocker.replace(print_text) reactor_mock = self.mocker.patch(FakeReactor) # This must necessarily happen in the following order. self.mocker.order() # This very informative message is printed out. print_text_mock("Please wait... ", "") time_mock = self.mocker.replace("time") time_mock.sleep(ANY) self.mocker.count(1) # After a nice dance the configuration is reloaded. config_mock.reload() # The register() method is called. We fire the "registration-failed" # event after it's done, so that it cascades into a deferred errback. def register_done(): service.reactor.fire("registration-failed") registration_mock.register() self.mocker.call(register_done) # The deferred errback finally prints out this message. print_text_mock("Invalid account name or registration key.", error=True) reactor_mock.stop() # This is actually called after everything else since all deferreds # are synchronous and callbacks will be executed immediately. reactor_mock.run() # Nothing else is printed! print_text_mock(ANY) self.mocker.count(0) self.mocker.replay() # DO IT! exit = [] register(self.config, print_text, exit.append, reactor=FakeReactor()) self.assertEqual([2], exit) def test_register_exchange_failure(self): """ When registration fails because the server couldn't be contacted, a message is printed and the program quits. """ service = self.broker_service registration_mock = self.mocker.replace(service.registration) config_mock = self.mocker.replace(service.config) print_text_mock = self.mocker.replace(print_text) reactor_mock = self.mocker.patch(FakeReactor) # This must necessarily happen in the following order. self.mocker.order() # This very informative message is printed out. print_text_mock("Please wait... ", "") time_mock = self.mocker.replace("time") time_mock.sleep(ANY) self.mocker.count(1) # After a nice dance the configuration is reloaded. config_mock.reload() def register_done(): service.reactor.fire("exchange-failed") registration_mock.register() self.mocker.call(register_done) # The deferred errback finally prints out this message. print_text_mock("We were unable to contact the server. " "Your internet connection may be down. " "The landscape client will continue to try and " "contact the server periodically.", error=True) reactor_mock.stop() # This is actually called after everything else since all deferreds # are synchronous and callbacks will be executed immediately. reactor_mock.run() # Nothing else is printed! print_text_mock(ANY) self.mocker.count(0) self.mocker.replay() # DO IT! exit = [] register(self.config, print_text, exit.append, reactor=FakeReactor()) self.assertEqual([2], exit) def test_register_timeout_failure(self): service = self.broker_service registration_mock = self.mocker.replace(service.registration) config_mock = self.mocker.replace(service.config) print_text_mock = self.mocker.replace(print_text) reactor_mock = self.mocker.patch(FakeReactor) remote_mock = self.mocker.patch(RemoteBroker) protocol_mock = self.mocker.patch(MethodCallSender) protocol_mock.timeout self.mocker.result(0.1) self.mocker.count(0, None) # This must necessarily happen in the following order. self.mocker.order() # This very informative message is printed out. print_text_mock("Please wait... ", "") time_mock = self.mocker.replace("time") time_mock.sleep(ANY) self.mocker.count(1) # After a nice dance the configuration is reloaded. config_mock.reload() remote_mock.call_on_event(ANY) self.mocker.result(succeed(None)) registration_mock.register() self.mocker.passthrough() # This is actually called after everything else since all deferreds # are synchronous and callbacks will be executed immediately. reactor_mock.run() # Nothing else is printed! print_text_mock(ANY) self.mocker.count(0) self.mocker.replay() # DO IT! fake_reactor = FakeReactor() fake_reactor._reactor = Clock() deferred = register(self.config, print_text, sys.exit, reactor=fake_reactor) fake_reactor._reactor.advance(100) return deferred def test_register_bus_connection_failure(self): """ If the socket can't be connected to, landscape-config will print an explanatory message and exit cleanly. """ # This will make the RemoteBrokerConnector.connect call fail print_text_mock = self.mocker.replace(print_text) time_mock = self.mocker.replace("time") sys_mock = self.mocker.replace("sys") reactor_mock = self.mocker.patch(LandscapeReactor) connector_factory = self.mocker.replace( "landscape.broker.amp.RemoteBrokerConnector", passthrough=False) connector = connector_factory(ANY, ANY) connector.connect(max_retries=0, quiet=True) self.mocker.result(fail(ZeroDivisionError)) print_text_mock(ARGS) time_mock.sleep(ANY) reactor_mock.run() print_text_mock( CONTAINS("There was an error communicating with the " "Landscape client"), error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) sys_mock.exit(2) connector.disconnect() reactor_mock.stop() self.mocker.replay() config = self.get_config(["-a", "accountname", "--silent"]) return register(config, print_text, sys.exit, max_retries=0) def test_register_bus_connection_failure_ok_no_register(self): """ Exit code 0 will be returned if we can't contact Landscape via DBus and --ok-no-register was passed. """ print_text_mock = self.mocker.replace(print_text) time_mock = self.mocker.replace("time") reactor_mock = self.mocker.patch(LandscapeReactor) print_text_mock(ARGS) time_mock.sleep(ANY) reactor_mock.run() reactor_mock.stop() print_text_mock( CONTAINS("There was an error communicating with the " "Landscape client"), error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) self.mocker.replay() config = self.get_config( ["-a", "accountname", "--silent", "--ok-no-register"]) return self.assertSuccess(register(config, print_text, sys.exit, max_retries=0)) class RegisterFunctionRetryTest(LandscapeConfigurationTest): helpers = [BrokerServiceHelper] def setUp(self): super(RegisterFunctionRetryTest, self).setUp() self.config = LandscapeSetupConfiguration() self.config.load(["-c", self.config_filename]) def test_register_with_retry_parameters(self): """ Retry parameters are passed to the L{connect} method of the connector. """ print_text_mock = self.mocker.replace(print_text) time_mock = self.mocker.replace("time") sys_mock = self.mocker.replace("sys") reactor_mock = self.mocker.patch(LandscapeReactor) connector_factory = self.mocker.replace( "landscape.broker.amp.RemoteBrokerConnector", passthrough=False) connector = connector_factory(ANY, ANY) connector.connect(quiet=True, max_retries=12) self.mocker.result(succeed(None)) print_text_mock(ARGS) time_mock.sleep(ANY) reactor_mock.run() print_text_mock( CONTAINS("There was an error communicating with the " "Landscape client"), error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) sys_mock.exit(2) connector.disconnect() reactor_mock.stop() self.mocker.replay() config = self.get_config(["-a", "accountname", "--silent"]) return register(config, print_text, sys.exit, max_retries=12) def test_register_with_default_retry_parameters(self): """ max_retries has reasonable default behavior - retry 14 times which will result in a wait of about 60 seconds, until the broker has time to start on heavily loaded systems. initialDelay = 0.05 factor = 1.62 maxDelay = 30 max_retries = 14 0.05 * (1 - 1.62 ** 14) / (1 - 1.62) = 69 seconds """ print_text_mock = self.mocker.replace(print_text) time_mock = self.mocker.replace("time") sys_mock = self.mocker.replace("sys") reactor_mock = self.mocker.patch(LandscapeReactor) connector_factory = self.mocker.replace( "landscape.broker.amp.RemoteBrokerConnector", passthrough=False) connector = connector_factory(ANY, ANY) connector.connect(quiet=True, max_retries=14) self.mocker.result(succeed(None)) print_text_mock(ARGS) time_mock.sleep(ANY) reactor_mock.run() print_text_mock( CONTAINS("There was an error communicating with the " "Landscape client"), error=True) print_text_mock(CONTAINS("This machine will be registered"), error=True) sys_mock.exit(2) connector.disconnect() reactor_mock.stop() self.mocker.replay() config = self.get_config(["-a", "accountname", "--silent"]) return register(config, print_text, sys.exit) class RegisterFunctionNoServiceTest(LandscapeTest): def test_register_unknown_error(self): """ When registration fails because of an unknown error, a message is printed and the program exits. """ configuration = LandscapeSetupConfiguration() # We'll just mock the remote here to have it raise an exception. connector_factory = self.mocker.replace( "landscape.broker.amp.RemoteBrokerConnector", passthrough=False) remote_broker = self.mocker.mock() print_text_mock = self.mocker.replace(print_text) reactor_mock = self.mocker.patch(FakeReactor) # This is unordered. It's just way too much of a pain. print_text_mock("Please wait... ", "") time_mock = self.mocker.replace("time") time_mock.sleep(ANY) self.mocker.count(1) # SNORE connector = connector_factory(ANY, configuration) connector.connect(max_retries=0, quiet=True) self.mocker.result(succeed(remote_broker)) remote_broker.reload_configuration() self.mocker.result(succeed(None)) remote_broker.call_on_event(ANY) self.mocker.result(succeed(None)) # here it is! remote_broker.register() self.mocker.result(fail(ZeroDivisionError)) print_text_mock(ANY, error=True) def check_logged_failure(text, error): self.assertTrue("ZeroDivisionError" in text) self.mocker.call(check_logged_failure) print_text_mock("Unknown error occurred.", error=True) # WHOAH DUDE. This waits for callLater(0, reactor.stop). connector.disconnect() reactor_mock.stop() reactor_mock.run() self.mocker.replay() exit = [] register(configuration, print_text, exit.append, max_retries=0, reactor=FakeReactor()) self.assertEqual(exit, [2]) class SSLCertificateDataTest(LandscapeConfigurationTest): def test_store_public_key_data(self): """ L{store_public_key_data} writes the SSL CA supplied by the server to a file for later use, this file is called after the name of the configuration file with .ssl_public_key. """ config = self.get_config([]) os.mkdir(config.data_path) key_filename = os.path.join( config.data_path, os.path.basename(config.get_config_filename()) + ".ssl_public_key") print_text_mock = self.mocker.replace(print_text) print_text_mock("Writing SSL CA certificate to %s..." % key_filename) self.mocker.replay() self.assertEqual(key_filename, store_public_key_data(config, "123456789")) self.assertEqual("123456789", open(key_filename, "r").read()) def test_fetch_base64_ssl(self): """ L{fetch_base64_ssl_public_certificate} should pull a JSON object from http://providedhostname/get-ca-cert. And return the custom_ca_cert data if it exists. """ base64_cert = "base64: MTIzNDU2Nzg5MA==" # encoded woo hoo fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("http://fakehost/get-ca-cert", insecure=True) self.mocker.result( "{\"custom_ca_cert\": \"%s\"}" % base64_cert) self.mocker.replay() def check_info(info): self.assertEqual( "Fetching CA certificate from fakehost if available...", str(info)) content = fetch_base64_ssl_public_certificate("fakehost", on_info=check_info) self.assertEqual(base64_cert, content) def test_fetch_base64_ssl_no_custom_ca(self): """ L{fetch_base64_ssl_public_certificate} should pull a JSON object from http://providedhostname/get-ca-cert. And return the custom_ca_cert data if it exists, otherwise it should return an empty string."" """ fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("http://fakehost/get-ca-cert", insecure=True) self.mocker.result("{}") print_text_mock = self.mocker.replace(print_text) print_text_mock( "Fetching CA certificate from fakehost if available...") print_text_mock("No custom CA certificate available for fakehost.") self.mocker.replay() content = fetch_base64_ssl_public_certificate("fakehost", on_info=print_text) self.assertEqual("", content) def test_fetch_base64_ssl_with_http_code_fetch_error(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("http://fakehost/get-ca-cert", insecure=True) self.mocker.throw(HTTPCodeError(404, "")) print_text_mock = self.mocker.replace(print_text) print_text_mock( "Fetching CA certificate from fakehost if available...") self.mocker.replay() def check_error(error): self.assertEqual("Unable to fetch CA certificate from discovered " "server fakehost: Server does not support client " "auto-registation.", str(error)) content = fetch_base64_ssl_public_certificate("fakehost", on_info=print_text, on_error=check_error) self.assertEquals("", content) def test_fetch_base64_ssl_with_pycurl_error(self): fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch") fetch_mock("http://fakehost/get-ca-cert", insecure=True) self.mocker.throw(PyCurlError(60, "pycurl message")) print_text_mock = self.mocker.replace(print_text) print_text_mock( "Fetching CA certificate from fakehost if available...") self.mocker.replay() def check_error(error): self.assertEqual("Unable to fetch CA certificate from fakehost: " "Error 60: pycurl message", str(error)) content = fetch_base64_ssl_public_certificate("fakehost", on_info=print_text, on_error=check_error) self.assertEquals("", content) landscape-client-14.01/landscape/tests/test_deployment.py0000644000175000017500000007054412301414317023434 0ustar andreasandreasimport sys import os from optparse import OptionParser from StringIO import StringIO from textwrap import dedent from landscape.deployment import Configuration, get_versioned_persist from landscape.manager.config import ManagerConfiguration from landscape.tests.helpers import LandscapeTest, LogKeeperHelper from landscape.tests.mocker import ANY class BabbleConfiguration(Configuration): config_section = "babble" default_config_filenames = [] def make_parser(self): parser = super(BabbleConfiguration, self).make_parser() parser.add_option("--whatever", metavar="STUFF") return parser class ConfigurationTest(LandscapeTest): helpers = [LogKeeperHelper] def setUp(self): super(ConfigurationTest, self).setUp() self.reset_config() def reset_config(self, configuration_class=None): if not configuration_class: class MyConfiguration(ManagerConfiguration): default_config_filenames = [] configuration_class = MyConfiguration self.config_class = configuration_class self.config = configuration_class() self.parser = self.config.make_parser() def test_get(self): self.write_config_file(log_level="file") self.config.load([]) self.assertEqual(self.config.get("log_level"), "file") self.assertEqual(self.config.get("random_key"), None) def test_get_config_object(self): """ Calling L{get_config_object} returns a L{ConfigObj} bound to the correct file and with its options set in the manor we expect. """ config_obj = self.config._get_config_object() self.assertEqual(self.config.get_config_filename(), config_obj.filename) self.assertFalse(config_obj.list_values) def test_get_config_object_with_alternative_config(self): """ Calling L{get_config_object} with a the L{alternative_config} parameter set, this source is used instead of calling through to L{get_config_filename}. """ config_obj = self.config._get_config_object( alternative_config=StringIO("[client]\nlog_level = error\n")) self.assertEqual(None, config_obj.filename) def write_config_file(self, **kwargs): section_name = kwargs.pop("section_name", "client") config = "\n".join(["[%s]" % (section_name,)] + ["%s = %s" % pair for pair in kwargs.items()]) self.config_filename = self.makeFile(config) self.config.default_config_filenames[:] = [self.config_filename] def test_command_line_has_precedence(self): self.write_config_file(log_level="file") self.config.load(["--log-level", "command line"]) self.assertEqual(self.config.log_level, "command line") def test_command_line_option_without_default(self): class MyConfiguration(Configuration): def make_parser(self): parser = OptionParser() # Keep the dash in the option name to ensure it works. parser.add_option("--foo-bar") return parser self.assertEqual(MyConfiguration().foo_bar, None) def test_command_line_with_required_options(self): class MyConfiguration(Configuration): required_options = ("foo_bar",) config = None def make_parser(self): parser = super(MyConfiguration, self).make_parser() # Keep the dash in the option name to ensure it works. parser.add_option("--foo-bar", metavar="NAME") return parser self.reset_config(configuration_class=MyConfiguration) self.write_config_file() sys_exit_mock = self.mocker.replace(sys.exit) sys_exit_mock(ANY) self.mocker.count(1) self.mocker.replay() self.config.load([]) # This will call our mocked sys.exit. self.config.load(["--foo-bar", "ooga"]) self.assertEqual(self.config.foo_bar, "ooga") def test_command_line_with_unsaved_options(self): class MyConfiguration(Configuration): unsaved_options = ("foo_bar",) config = None def make_parser(self): parser = super(MyConfiguration, self).make_parser() # Keep the dash in the option name to ensure it works. parser.add_option("--foo-bar", metavar="NAME") return parser self.reset_config(configuration_class=MyConfiguration) self.write_config_file() self.config.load(["--foo-bar", "ooga"]) self.assertEqual(self.config.foo_bar, "ooga") self.config.write() self.config.load([]) self.assertEqual(self.config.foo_bar, None) def test_config_file_has_precedence_over_default(self): self.write_config_file(log_level="file") self.config.load([]) self.assertEqual(self.config.log_level, "file") def test_different_config_file_section(self): self.reset_config(configuration_class=BabbleConfiguration) self.write_config_file(section_name="babble", whatever="yay") self.config.load([]) self.assertEqual(self.config.whatever, "yay") def test_no_section_available(self): config_filename = self.makeFile("") class MyConfiguration(Configuration): config_section = "nonexistent" default_config_filenames = (config_filename,) self.reset_config(configuration_class=MyConfiguration) self.config.load([]) def test_write_configuration(self): self.write_config_file(log_level="debug") self.config.log_level = "warning" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = warning") def test_write_configuration_with_section(self): self.reset_config(configuration_class=BabbleConfiguration) self.write_config_file(section_name="babble", whatever="yay") self.config.whatever = "boo" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[babble]\nwhatever = boo") def test_write_unrelated_configuration_back(self): """ If a configuration file has a section that isn't processed by a particular configuration object, that unrelated configuration section will be maintained even when written back. """ self.reset_config(configuration_class=BabbleConfiguration) config = "[babble]\nwhatever = zoot\n[goojy]\nunrelated = yes" config_filename = self.makeFile(config) self.config.load_configuration_file(config_filename) self.config.whatever = "boo" self.config.write() data = open(config_filename).read() self.assertConfigEqual( data, "[babble]\nwhatever = boo\n\n[goojy]\nunrelated = yes") def test_write_on_the_right_default_config_file(self): self.write_config_file(log_level="debug") config_class = self.config_class config_class.default_config_filenames.insert(0, "/non/existent") self.config.load([]) self.config.log_level = "warning" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = warning\n") def test_write_empty_list_values_instead_of_double_quotes(self): """ Since list values are strings, an empty string such as C{""} will be written to the config file as an option with a empty value instead of C{""}. """ self.write_config_file(include_manager_plugins="ScriptExecution") self.config.load([]) self.config.include_manager_plugins = "" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\ninclude_manager_plugins = \n") def test_dont_write_config_specified_default_options(self): """ Don't write options to the file if the value exactly matches the default and the value already existed in the original config file. """ self.write_config_file(log_level="debug") self.config.log_level = "info" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]") def test_dont_write_unspecified_default_options(self): """ Don't write options to the file if the value exactly matches the default and the value did not exist in the original config file. """ self.write_config_file() self.config.log_level = "info" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]") def test_dont_write_client_section_default_options(self): """ Don't write options to the file if they exactly match the default and didn't already exist in the file. """ self.write_config_file(log_level="debug") self.config.log_level = "info" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]") def test_do_write_preexisting_default_options(self): """ If the value of an option matches the default, but the option was already written in the file, then write it back to the file. """ config = "[client]\nlog_level = info\n" config_filename = self.makeFile(config) self.config.load_configuration_file(config_filename) self.config.log_level = "info" self.config.write() data = open(config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = info\n") def test_dont_delete_explicitly_set_default_options(self): """ If the user explicitly sets a configuration option to its default value, we shouldn't delete that option from the conf file when we write it, just to be nice. """ self.write_config_file(log_level="info") self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = info") def test_dont_write_config_option(self): self.write_config_file() self.config.config = self.config_filename self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]") def test_write_command_line_options(self): self.write_config_file() self.config.load(["--log-level", "warning"]) self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = warning\n") def test_write_command_line_precedence(self): """Command line options take precedence over config file when writing. """ self.write_config_file(log_level="debug") self.config.load(["--log-level", "warning"]) self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = warning\n") def test_write_manually_set_precedence(self): """Manually set options take precedence over command line when writing. """ self.write_config_file(log_level="debug") self.config.load(["--log-level", "warning"]) self.config.log_level = "error" self.config.write() data = open(self.config_filename).read() self.assertConfigEqual(data, "[client]\nlog_level = error\n") def test_write_to_given_config_file(self): filename = self.makeFile() self.config.load( ["--log-level", "warning", "--config", filename], accept_nonexistent_config=True) self.config.log_level = "error" self.config.write() data = open(filename).read() self.assertConfigEqual(data, "[client]\nlog_level = error\n") def test_comments_are_maintained(self): """ When we write an updated config file, comments that existed previously are maintained. """ config = "[client]\n# Comment 1\nlog_level = file\n#Comment 2\n" filename = self.makeFile(config) self.config.load_configuration_file(filename) self.config.log_level = "error" self.config.write() new_config = open(filename).read() self.assertConfigEqual( new_config, "[client]\n# Comment 1\nlog_level = error\n#Comment 2\n") def test_config_option(self): options = self.parser.parse_args(["--config", "hello.cfg"])[0] self.assertEqual(options.config, "hello.cfg") def test_load_config_from_option(self): """ Ensure config option of type string shows up in self.config when config.load is called. """ filename = self.makeFile("[client]\nhello = world\n") self.config.load(["--config", filename]) self.assertEqual(self.config.hello, "world") def test_load_typed_option_from_file(self): """ Ensure config option of type int shows up in self.config when config.load is called. """ class MyConfiguration(self.config_class): def make_parser(self): parser = super(MyConfiguration, self).make_parser() parser.add_option("--year", default=1, type="int") return parser filename = self.makeFile("[client]\nyear = 2008\n") config = MyConfiguration() config.load(["--config", filename]) self.assertEqual(config.year, 2008) def test_load_typed_option_from_command_line(self): """ Ensure command line config option of type int shows up in self.config when config.load is called. """ class MyConfiguration(self.config_class): def make_parser(self): parser = super(MyConfiguration, self).make_parser() parser.add_option("--year", default=1, type="int") return parser self.write_config_file() config = MyConfiguration() config.load(["--year", "2008"]) self.assertEqual(config.year, 2008) def test_reload(self): """ Ensure updated options written to config file are surfaced on config.reload() """ filename = self.makeFile("[client]\nhello = world1\n") self.config.load(["--config", filename]) open(filename, "w").write("[client]\nhello = world2\n") self.config.reload() self.assertEqual(self.config.hello, "world2") def test_load_cannot_read(self): """ C{config.load} exits the process if the specific config file can't be read because of permission reasons. """ filename = self.makeFile("[client]\nhello = world1\n") os.chmod(filename, 0) error = self.assertRaises( SystemExit, self.config.load, ["--config", filename]) self.assertEqual( "error: config file %s can't be read" % filename, str(error)) def test_load_not_found(self): """ C{config.load} exits the process if the specified config file is not found. """ filename = "/not/here" error = self.assertRaises( SystemExit, self.config.load, ["--config", filename]) self.assertEqual( "error: config file %s can't be read" % filename, str(error)) def test_load_cannot_read_default(self): """ C{config.load} exits the process if the default config file can't be read because of permission reasons. """ self.write_config_file() [default] = self.config.default_config_filenames os.chmod(default, 0) error = self.assertRaises(SystemExit, self.config.load, []) self.assertEqual( "error: config file %s can't be read" % default, str(error)) def test_load_not_found_default(self): """ C{config.load} exits the process if the default config file is not found. """ [default] = self.config.default_config_filenames[:] = ["/not/here"] error = self.assertRaises(SystemExit, self.config.load, []) self.assertEqual( "error: config file %s can't be read" % default, str(error)) def test_load_cannot_read_many_defaults(self): """ C{config.load} exits the process if none of the default config files exists and can be read. """ default1 = self.makeFile("") default2 = self.makeFile("") os.chmod(default1, 0) os.unlink(default2) self.config.default_config_filenames[:] = [default1, default2] error = self.assertRaises(SystemExit, self.config.load, []) self.assertEqual("error: no config file could be read", str(error)) def test_data_directory_option(self): """Ensure options.data_path option can be read by parse_args.""" options = self.parser.parse_args(["--data-path", "/opt/hoojy/var/run"])[0] self.assertEqual(options.data_path, "/opt/hoojy/var/run") def test_data_directory_default(self): """Ensure parse_args sets appropriate data_path default.""" options = self.parser.parse_args([])[0] self.assertEqual(options.data_path, "/var/lib/landscape/client/") def test_url_option(self): """Ensure options.url option can be read by parse_args.""" options = self.parser.parse_args(["--url", "http://mylandscape/message-system"])[0] self.assertEqual(options.url, "http://mylandscape/message-system") def test_url_default(self): """Ensure parse_args sets appropriate url default.""" options = self.parser.parse_args([])[0] self.assertEqual(options.url, self.config.DEFAULT_URL) def test_ping_url_option(self): """Ensure options.ping_url option can be read by parse_args.""" options = self.parser.parse_args(["--ping-url", "http://mylandscape/ping"])[0] self.assertEqual(options.ping_url, "http://mylandscape/ping") def test_ping_url_default(self): """Ensure parse_args sets appropriate ping_url default.""" options = self.parser.parse_args([])[0] self.assertEqual( options.ping_url, "http://landscape.canonical.com/ping") def test_ssl_public_key_option(self): """Ensure options.ssl_public_key option can be read by parse_args.""" options = self.parser.parse_args(["--ssl-public-key", "/tmp/somekeyfile.ssl"])[0] self.assertEqual(options.ssl_public_key, "/tmp/somekeyfile.ssl") def test_ssl_public_key_default(self): """Ensure parse_args sets appropriate ssl_public_key default.""" options = self.parser.parse_args([])[0] self.assertEqual(options.ssl_public_key, None) def test_server_autodiscover_option(self): """ Ensure options.server_autodiscover option can be read by parse_args. """ options = self.parser.parse_args(["--server-autodiscover=true"])[0] self.assertEqual(options.server_autodiscover, "true") def test_server_autodiscover_default(self): """Ensure parse_args sets appropriate server_autodiscover default.""" options = self.parser.parse_args([])[0] self.assertEqual(options.server_autodiscover, False) def test_autodiscover_srv_query_string_option(self): """ Ensure options.autodiscover_srv_query_string option can be read by parse_args. """ options = self.parser.parse_args(["--autodiscover-srv-query-string", "_tcp._landscape.someotherdomain"])[0] self.assertEqual(options.autodiscover_srv_query_string, "_tcp._landscape.someotherdomain") def test_autodiscover_srv_query_string_default(self): """ Ensure parse_args sets appropriate autodiscover_srv_query_string default. """ options = self.parser.parse_args([])[0] self.assertEqual(options.autodiscover_srv_query_string, "_landscape._tcp.localdomain") def test_autodiscover_a_query_string_option(self): """ Ensure options.autodiscover_a_query_string option can be read by parse_args. """ options = self.parser.parse_args(["--autodiscover-a-query-string", "customname.mydomain"])[0] self.assertEqual(options.autodiscover_a_query_string, "customname.mydomain") def test_autodiscover_a_query_string_default(self): """ Ensure parse_args sets appropriate autodiscover_a_query_string default. """ options = self.parser.parse_args([])[0] self.assertEqual(options.autodiscover_a_query_string, "landscape.localdomain") def test_log_file_option(self): """Ensure options.log_dir option can be read by parse_args.""" options = self.parser.parse_args(["--log-dir", "/var/log/my-awesome-log"])[0] self.assertEqual(options.log_dir, "/var/log/my-awesome-log") def test_log_level_default(self): """Ensure options.log_level default is set within parse_args.""" options = self.parser.parse_args([])[0] self.assertEqual(options.log_level, "info") def test_log_level_option(self): """Ensure options.log_level option can be read by parse_args.""" options = self.parser.parse_args(["--log-level", "debug"])[0] self.assertEqual(options.log_level, "debug") def test_quiet_option(self): """Ensure options.quiet option can be read by parse_args.""" options = self.parser.parse_args(["--quiet"])[0] self.assertEqual(options.quiet, True) def test_quiet_default(self): """Ensure options.quiet default is set within parse_args.""" options = self.parser.parse_args([])[0] self.assertEqual(options.quiet, False) def test_clones_default(self): """By default, no clones are started.""" self.write_config_file() options = self.parser.parse_args([])[0] self.assertEqual(0, options.clones) def test_clones_option(self): """It's possible to specify additional clones to be started.""" options = self.parser.parse_args(["--clones", "3"])[0] self.assertEqual(3, options.clones) def test_ignore_sigint_option(self): """Ensure options.ignore_sigint option can be read by parse_args.""" options = self.parser.parse_args(["--ignore-sigint"])[0] self.assertEqual(options.ignore_sigint, True) def test_ignore_sigint_default(self): """Ensure options.ignore_sigint default is set within parse_args.""" options = self.parser.parse_args([])[0] self.assertEqual(options.ignore_sigint, False) def test_get_config_filename_precedence(self): """ Validate landscape-client configuration file load precedence. The client config should return the first readable configuration files in the default_config_filenames list if no config option was requested. If a specific config file is requested, use this instead of defaults. If a cmdline --config option is specified this should take precedence over either of the former options. """ default_filename1 = self.makeFile("") default_filename2 = self.makeFile("") explicit_filename = self.makeFile("") loaded_filename = self.makeFile("") self.config.default_config_filenames[:] = [default_filename1, default_filename2] # If nothing else is set, and the first configuration file # isn't readable, return the second default file. os.chmod(default_filename1, 0) self.assertEqual(self.config.get_config_filename(), default_filename2) # If it is readable, than return the first default configuration file. os.chmod(default_filename1, 0644) self.assertEqual(self.config.get_config_filename(), default_filename1) # Unless another file was explicitly loaded before, in which # case return the loaded filename. self.config.load_configuration_file(loaded_filename) self.assertEqual(self.config.get_config_filename(), loaded_filename) # Except in the case where a configuration file was explicitly # requested through the command line or something. In this case, # this is the highest precedence. self.config.config = explicit_filename self.assertEqual(self.config.get_config_filename(), explicit_filename) def test_sockets_path(self): """ The L{Configuration.socket_path} property returns the path to the socket directory. """ self.assertEqual( "/var/lib/landscape/client/sockets", self.config.sockets_path) def test_annotations_path(self): """ The L{Configuration.annotations_path} property returns the path to the annotations directory. """ self.assertEqual( "/var/lib/landscape/client/annotations.d", self.config.annotations_path) def test_juju_filename(self): """ The L{Configuration.juju_filename} property returns the path to the juju info file. """ self.assertEqual( "/var/lib/landscape/client/juju-info.json", self.config.juju_filename) def test_clone(self): """The L{Configuration.clone} method clones a configuration.""" self.write_config_file() self.config.load(["--data-path", "/some/path"]) self.config.foo = "bar" config2 = self.config.clone() self.assertEqual(self.config.data_path, config2.data_path) self.assertEqual("bar", config2.foo) def test_duplicate_key(self): """ Duplicate keys in the config file shouldn't result in a fatal error, but the first defined value should be used. """ config = dedent(""" [client] computer_title = frog computer_title = flag """) filename = self.makeFile(config) self.config.load_configuration_file(filename) self.assertEqual("frog", self.config.computer_title) self.assertIn("WARNING: Duplicate keyword name at line 4.", self.logfile.getvalue()) def test_triplicate_key(self): """ Triplicate keys in the config file shouldn't result in a fatal error, but the first defined value should be used. """ config = dedent(""" [client] computer_title = frog computer_title = flag computer_title = flop """) filename = self.makeFile(config) self.config.load_configuration_file(filename) self.assertEqual("frog", self.config.computer_title) logged = self.logfile.getvalue() self.assertIn("WARNING: Parsing failed with several errors.", logged) self.assertIn("First error at line 4.", logged) def test_config_values_after_fault_are_still_read(self): """ Values that appear after the point in a configuration file where a parsing error occurs are correctly parsed. """ config = dedent(""" [client] computer_title = frog computer_title = flag log_level = debug """) filename = self.makeFile(config) self.config.load_configuration_file(filename) self.assertEqual("debug", self.config.log_level) self.assertIn("WARNING: Duplicate keyword name at line 4.", self.logfile.getvalue()) class GetVersionedPersistTest(LandscapeTest): def test_upgrade_service(self): class FakeService(object): persist_filename = self.makePersistFile(content="") service_name = "monitor" upgrade_managers = self.mocker.replace( "landscape.upgraders.UPGRADE_MANAGERS", passthrough=False) upgrade_manager = upgrade_managers["monitor"] upgrade_manager.apply(ANY) stash = [] self.mocker.call(stash.append) self.mocker.replay() persist = get_versioned_persist(FakeService()) self.assertEqual(stash[0], persist) landscape-client-14.01/landscape/tests/test_textmessage.py0000644000175000017500000000763112301414317023602 0ustar andreasandreasimport sys from landscape.textmessage import ( AcceptedTypeError, EmptyMessageError, got_accepted_types, get_message, send_message) from landscape.tests.helpers import ( LandscapeTest, FakeBrokerServiceHelper, StandardIOHelper) class SendMessageTest(LandscapeTest): helpers = [StandardIOHelper, FakeBrokerServiceHelper] def test_send_message(self): """ L{send_message} should send a message of type C{text-message} to the landscape dbus messaging service. """ service = self.broker_service service.message_store.set_accepted_types(["text-message"]) result = send_message(u"Hi there!", self.remote) def got_result(result): messages = service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) self.assertMessage(messages[0], {"type": "text-message", "message": u"Hi there!"}) self.assertTrue(service.exchanger.is_urgent()) return result.addCallback(got_result) def test_got_accepted_types_without_text_message_type(self): """ If 'text-message' isn't in the list of accepted types an L{AcceptedTypeError} is raised. """ self.assertRaises(AcceptedTypeError, got_accepted_types, (), self.remote, ()) def test_got_accepted_types(self): """ If 'text-message' is an accepted type a message should be retrieved from the user and sent to the broker. """ service = self.broker_service service.message_store.set_accepted_types(["text-message"]) input = u"Foobl\N{HIRAGANA LETTER A}" self.stdin.write(input.encode("UTF-8")) self.stdin.seek(0, 0) def got_result(result): messages = service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) self.assertMessage(messages[0], {"type": "text-message", "message": u"Foobl\N{HIRAGANA LETTER A}"}) d = got_accepted_types(["text-message"], self.remote, ()) d.addCallback(got_result) return d class ScriptTest(LandscapeTest): helpers = [StandardIOHelper] def test_get_message(self): """ A message should be properly decoded from the command line arguments. """ message = get_message( ["landscape-message", u"\N{HIRAGANA LETTER A}".encode(sys.stdin.encoding), "a!"]) self.assertEqual(message, u"\N{HIRAGANA LETTER A} a!") def test_get_message_stdin(self): """ If no arguments are specified then the message should be read from stdin. """ input = u"Foobl\N{HIRAGANA LETTER A}" self.stdin.write(input.encode("UTF-8")) self.stdin.seek(0, 0) message = get_message(["landscape-message"]) self.assertEqual(self.stdout.getvalue(), "Please enter your message, and send EOF " "(Control + D after newline) when done.\n") self.assertEqual(message, input) def test_get_empty_message_stdin(self): """ If no arguments are specified then the message should be read from stdin. """ self.assertRaises(EmptyMessageError, get_message, ["landscape-message"]) def test_get_message_without_encoding(self): """ If sys.stdin.encoding is None, it's likely a pipe, so try to decode it as UTF-8 by default. """ encoding = sys.stdin.encoding sys.stdin.encoding = None try: message = get_message( ["landscape-message", u"\N{HIRAGANA LETTER A}".encode("UTF-8"), "a!"]) finally: sys.stdin.encoding = encoding self.assertEqual(message, u"\N{HIRAGANA LETTER A} a!") landscape-client-14.01/landscape/tests/test_amp.py0000644000175000017500000001265612301414317022031 0ustar andreasandreasfrom twisted.internet.error import ConnectError from twisted.internet.task import Clock from landscape.tests.helpers import LandscapeTest from landscape.reactor import FakeReactor from landscape.deployment import Configuration from landscape.amp import ComponentPublisher, ComponentConnector, remote from landscape.lib.amp import MethodCallError class TestComponent(object): name = "test" @remote def ping(self): return True def non_remote(self): return False class TestComponentConnector(ComponentConnector): component = TestComponent class FakeAMP(object): def __init__(self, locator): self._locator = locator class ComponentPublisherTest(LandscapeTest): def setUp(self): super(ComponentPublisherTest, self).setUp() reactor = FakeReactor() config = Configuration() config.data_path = self.makeDir() self.makeDir(path=config.sockets_path) self.component = TestComponent() self.publisher = ComponentPublisher(self.component, reactor, config) self.publisher.start() self.connector = TestComponentConnector(reactor, config) connected = self.connector.connect() connected.addCallback(lambda remote: setattr(self, "remote", remote)) return connected def tearDown(self): self.connector.disconnect() self.publisher.stop() super(ComponentPublisherTest, self).tearDown() def test_remote_methods(self): """Methods decorated with @remote are accessible remotely.""" result = self.remote.ping() return self.assertSuccess(result, True) def test_protect_non_remote(self): """Methods not decorated with @remote are not accessible remotely.""" result = self.remote.non_remote() failure = self.failureResultOf(result) self.assertTrue(failure.check(MethodCallError)) class ComponentConnectorTest(LandscapeTest): def setUp(self): super(ComponentConnectorTest, self).setUp() self.reactor = FakeReactor() # XXX this should be dropped once the FakeReactor doesn't use the # real reactor anymore under the hood. self.reactor._reactor = Clock() self.config = Configuration() self.config.data_path = self.makeDir() self.makeDir(path=self.config.sockets_path) self.connector = TestComponentConnector(self.reactor, self.config) def test_connect_with_max_retries(self): """ If C{max_retries} is passed to L{RemoteObjectConnector.connect}, then it will give up trying to connect after that amount of times. """ self.log_helper.ignore_errors("Error while connecting to test") deferred = self.connector.connect(max_retries=2) self.assertNoResult(deferred) return self.failureResultOf(deferred).trap(ConnectError) def test_connect_logs_errors(self): """ Connection errors are logged. """ self.log_helper.ignore_errors("Error while connecting to test") def assert_log(ignored): self.assertIn("Error while connecting to test", self.logfile.getvalue()) result = self.connector.connect(max_retries=0) self.assertFailure(result, ConnectError) return result.addCallback(assert_log) def test_connect_with_quiet(self): """ If the C{quiet} option is passed, no errors will be logged. """ result = self.connector.connect(max_retries=0, quiet=True) return self.assertFailure(result, ConnectError) def test_reconnect_fires_event(self): """ An event is fired whenever the connection is established again after it has been lost. """ reconnects = [] self.reactor.call_on("test-reconnect", lambda: reconnects.append(True)) component = TestComponent() publisher = ComponentPublisher(component, self.reactor, self.config) publisher.start() deferred = self.connector.connect() self.successResultOf(deferred) self.connector._connector.disconnect() # Simulate a disconnection self.assertEqual([], reconnects) self.reactor._reactor.advance(10) self.assertEqual([True], reconnects) def test_connect_with_factor(self): """ If C{factor} is passed to the L{ComponentConnector.connect} method, then the associated protocol factory will be set to that value. """ component = TestComponent() publisher = ComponentPublisher(component, self.reactor, self.config) publisher.start() deferred = self.connector.connect(factor=1.0) remote = self.successResultOf(deferred) self.assertEqual(1.0, remote._factory.factor) def test_disconnect(self): """ It is possible to call L{ComponentConnector.disconnect} multiple times, even if the connection has been already closed. """ component = TestComponent() publisher = ComponentPublisher(component, self.reactor, self.config) publisher.start() self.connector.connect() self.connector.disconnect() self.connector.disconnect() def test_disconnect_without_connect(self): """ It is possible to call L{ComponentConnector.disconnect} even if the connection was never established. In that case the method is effectively a no-op. """ self.connector.disconnect() landscape-client-14.01/landscape/tests/test_diff.py0000644000175000017500000000212412301414317022151 0ustar andreasandreasfrom landscape.diff import diff from landscape.tests.helpers import LandscapeTest class DiffTest(LandscapeTest): def test_empty(self): self.assertEqual(diff({}, {}), ({}, {}, {})) def test_identical(self): data = {"str": "wubble", "strlist": ["foo", "bar"]} self.assertEqual(diff(data, data), ({}, {}, {})) def test_create(self): old = {} new = {"str": "wubble"} self.assertEqual(diff(old, new), ({"str": "wubble"}, {}, {})) def test_update(self): old = {"str": "wubble"} new = {"str": "ooga"} self.assertEqual(diff(old, new), ({}, {"str": "ooga"}, {})) def test_delete(self): old = {"str": "wubble"} new = {} self.assertEqual(diff(old, new), ({}, {}, {"str": "wubble"})) def test_complex(self): old = {"str": "wubble", "int": 10} new = {"strlist": ["foo", "bar"], "int": 25} self.assertEqual(diff(old, new), ({"strlist": ["foo", "bar"]}, {"int": 25}, {"str": "wubble"})) landscape-client-14.01/landscape/tests/test_service.py0000644000175000017500000000647312301414317022714 0ustar andreasandreasimport logging import signal from twisted.internet import reactor from twisted.internet.task import deferLater from landscape.reactor import FakeReactor from landscape.deployment import Configuration from landscape.service import LandscapeService from landscape.tests.helpers import LandscapeTest class TestComponent(object): name = "monitor" class TestService(LandscapeService): service_name = TestComponent.name class LandscapeServiceTest(LandscapeTest): def setUp(self): super(LandscapeServiceTest, self).setUp() self.config = Configuration() self.config.data_path = self.makeDir() self.makeDir(path=self.config.sockets_path) self.reactor = FakeReactor() signal.signal(signal.SIGUSR1, signal.SIG_DFL) def tearDown(self): super(LandscapeServiceTest, self).tearDown() signal.signal(signal.SIGUSR1, signal.SIG_DFL) def test_create_persist(self): """ If a {persist_filename} attribute is defined, a L{Persist} with that filename will be created. """ class PersistService(TestService): persist_filename = self.makeFile(content="") service = PersistService(self.config) self.assertEqual(service.persist.filename, service.persist_filename) def test_no_persist_without_filename(self): """ If no {persist_filename} attribute is defined, no C{persist} attribute will be available. """ service = TestService(self.config) self.assertFalse(hasattr(service, "persist")) def test_install_bpickle_dbus(self): """ A L{LandscapeService} installs the DBus extensions of bpickle. """ dbus_mock = self.mocker.replace("landscape.lib.bpickle_dbus.install") dbus_mock() self.mocker.replay() TestService(self.config) def test_usr1_rotates_logs(self): """ SIGUSR1 should cause logs to be reopened. """ logging.getLogger().addHandler(logging.FileHandler(self.makeFile())) # Store the initial set of handlers original_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] # Instantiating LandscapeService should register the handler TestService(self.config) # We'll call it directly handler = signal.getsignal(signal.SIGUSR1) self.assertTrue(handler) handler(None, None) def check(ign): new_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] for stream in new_streams: self.assertTrue(stream not in original_streams) # We need to give some room for the callFromThread to run d = deferLater(reactor, 0, lambda: None) return d.addCallback(check) def test_ignore_sigusr1(self): """ SIGUSR1 is ignored if we so request. """ # Instantiating LandscapeService should not register the # handler if we request to ignore it. self.config.ignore_sigusr1 = True TestService(self.config) handler = signal.getsignal(signal.SIGUSR1) self.assertFalse(handler) landscape-client-14.01/landscape/tests/mocker.py0000644000175000017500000023164312301414317021474 0ustar andreasandreas""" Mocker Graceful platform for test doubles in Python: mocks, stubs, fakes, and dummies. Copyright (c) 2007-2010, Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import __builtin__ import tempfile import unittest import inspect import shutil import types import sys import os import gc if sys.version_info < (2, 4): from sets import Set as set # pragma: nocover __all__ = ["Mocker", "Expect", "expect", "IS", "CONTAINS", "IN", "MATCH", "ANY", "ARGS", "KWARGS", "MockerTestCase"] __author__ = "Gustavo Niemeyer " __license__ = "BSD" __version__ = "1.0" ERROR_PREFIX = "[Mocker] " # -------------------------------------------------------------------- # Exceptions class MatchError(AssertionError): """Raised when an unknown expression is seen in playback mode.""" # -------------------------------------------------------------------- # Helper for chained-style calling. class expect(object): """This is a simple helper that allows a different call-style. With this class one can comfortably do chaining of calls to the mocker object responsible by the object being handled. For instance:: expect(obj.attr).result(3).count(1, 2) Is the same as:: obj.attr mocker.result(3) mocker.count(1, 2) """ __mocker__ = None def __init__(self, mock, attr=None): self._mock = mock self._attr = attr def __getattr__(self, attr): return self.__class__(self._mock, attr) def __call__(self, *args, **kwargs): mocker = self.__mocker__ if not mocker: mocker = self._mock.__mocker__ getattr(mocker, self._attr)(*args, **kwargs) return self def Expect(mocker): """Create an expect() "function" using the given Mocker instance. This helper allows defining an expect() "function" which works even in trickier cases such as: expect = Expect(mymocker) expect(iter(mock)).generate([1, 2, 3]) """ return type("Expect", (expect,), {"__mocker__": mocker}) # -------------------------------------------------------------------- # Extensions to Python's unittest. class MockerTestCase(unittest.TestCase): """unittest.TestCase subclass with Mocker support. @ivar mocker: The mocker instance. This is a convenience only. Mocker may easily be used with the standard C{unittest.TestCase} class if wanted. Test methods have a Mocker instance available on C{self.mocker}. At the end of each test method, expectations of the mocker will be verified, and any requested changes made to the environment will be restored. In addition to the integration with Mocker, this class provides a few additional helper methods. """ def __init__(self, methodName="runTest"): # So here is the trick: we take the real test method, wrap it on # a function that do the job we have to do, and insert it in the # *instance* dictionary, so that getattr() will return our # replacement rather than the class method. test_method = getattr(self, methodName, None) if test_method is not None: def test_method_wrapper(): try: result = test_method() except: raise else: if (self.mocker.is_recording() and self.mocker.get_events()): raise RuntimeError("Mocker must be put in replay " "mode with self.mocker.replay()") if (hasattr(result, "addCallback") and hasattr(result, "addErrback")): def verify(result): self.mocker.verify() return result result.addCallback(verify) else: self.mocker.verify() self.mocker.restore() return result # Copy all attributes from the original method.. for attr in dir(test_method): # .. unless they're present in our wrapper already. if not hasattr(test_method_wrapper, attr) or attr == "__doc__": setattr(test_method_wrapper, attr, getattr(test_method, attr)) setattr(self, methodName, test_method_wrapper) # We could overload run() normally, but other well-known testing # frameworks do it as well, and some of them won't call the super, # which might mean that cleanup wouldn't happen. With that in mind, # we make integration easier by using the following trick. run_method = self.run def run_wrapper(*args, **kwargs): try: return run_method(*args, **kwargs) finally: self.__cleanup() self.run = run_wrapper self.mocker = Mocker() self.expect = Expect(self.mocker) self.__cleanup_funcs = [] self.__cleanup_paths = [] super(MockerTestCase, self).__init__(methodName) def __call__(self, *args, **kwargs): # This is necessary for Python 2.3 only, because it didn't use run(), # which is supported above. try: super(MockerTestCase, self).__call__(*args, **kwargs) finally: if sys.version_info < (2, 4): self.__cleanup() def __cleanup(self): for path in self.__cleanup_paths: if os.path.isfile(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) self.mocker.reset() for func, args, kwargs in self.__cleanup_funcs: func(*args, **kwargs) def addCleanup(self, func, *args, **kwargs): self.__cleanup_funcs.append((func, args, kwargs)) def makeFile(self, content=None, suffix="", prefix="tmp", basename=None, dirname=None, path=None): """Create a temporary file and return the path to it. @param content: Initial content for the file. @param suffix: Suffix to be given to the file's basename. @param prefix: Prefix to be given to the file's basename. @param basename: Full basename for the file. @param dirname: Put file inside this directory. The file is removed after the test runs. """ if path is not None: self.__cleanup_paths.append(path) elif basename is not None: if dirname is None: dirname = tempfile.mkdtemp() self.__cleanup_paths.append(dirname) path = os.path.join(dirname, basename) else: fd, path = tempfile.mkstemp(suffix, prefix, dirname) self.__cleanup_paths.append(path) os.close(fd) if content is None: os.unlink(path) if content is not None: file = open(path, "w") file.write(content) file.close() return path def makeDir(self, suffix="", prefix="tmp", dirname=None, path=None): """Create a temporary directory and return the path to it. @param suffix: Suffix to be given to the file's basename. @param prefix: Prefix to be given to the file's basename. @param dirname: Put directory inside this parent directory. The directory is removed after the test runs. """ if path is not None: os.makedirs(path) else: path = tempfile.mkdtemp(suffix, prefix, dirname) self.__cleanup_paths.append(path) return path def failUnlessIs(self, first, second, msg=None): """Assert that C{first} is the same object as C{second}.""" if first is not second: raise self.failureException(msg or "%r is not %r" % (first, second)) def failIfIs(self, first, second, msg=None): """Assert that C{first} is not the same object as C{second}.""" if first is second: raise self.failureException(msg or "%r is %r" % (first, second)) def failUnlessIn(self, first, second, msg=None): """Assert that C{first} is contained in C{second}.""" if first not in second: raise self.failureException(msg or "%r not in %r" % (first, second)) def failUnlessStartsWith(self, first, second, msg=None): """Assert that C{first} starts with C{second}.""" if first[:len(second)] != second: raise self.failureException(msg or "%r doesn't start with %r" % (first, second)) def failIfStartsWith(self, first, second, msg=None): """Assert that C{first} doesn't start with C{second}.""" if first[:len(second)] == second: raise self.failureException(msg or "%r starts with %r" % (first, second)) def failUnlessEndsWith(self, first, second, msg=None): """Assert that C{first} starts with C{second}.""" if first[len(first)-len(second):] != second: raise self.failureException(msg or "%r doesn't end with %r" % (first, second)) def failIfEndsWith(self, first, second, msg=None): """Assert that C{first} doesn't start with C{second}.""" if first[len(first)-len(second):] == second: raise self.failureException(msg or "%r ends with %r" % (first, second)) def failIfIn(self, first, second, msg=None): """Assert that C{first} is not contained in C{second}.""" if first in second: raise self.failureException(msg or "%r in %r" % (first, second)) def failUnlessApproximates(self, first, second, tolerance, msg=None): """Assert that C{first} is near C{second} by at most C{tolerance}.""" if abs(first - second) > tolerance: raise self.failureException(msg or "abs(%r - %r) > %r" % (first, second, tolerance)) def failIfApproximates(self, first, second, tolerance, msg=None): """Assert that C{first} is far from C{second} by at least C{tolerance}. """ if abs(first - second) <= tolerance: raise self.failureException(msg or "abs(%r - %r) <= %r" % (first, second, tolerance)) def failUnlessMethodsMatch(self, first, second): """Assert that public methods in C{first} are present in C{second}. This method asserts that all public methods found in C{first} are also present in C{second} and accept the same arguments. C{first} may have its own private methods, though, and may not have all methods found in C{second}. Note that if a private method in C{first} matches the name of one in C{second}, their specification is still compared. This is useful to verify if a fake or stub class have the same API as the real class being simulated. """ first_methods = dict(inspect.getmembers(first, inspect.ismethod)) second_methods = dict(inspect.getmembers(second, inspect.ismethod)) for name, first_method in first_methods.iteritems(): first_argspec = inspect.getargspec(first_method) first_formatted = inspect.formatargspec(*first_argspec) second_method = second_methods.get(name) if second_method is None: if name[:1] == "_": continue # First may have its own private methods. raise self.failureException("%s.%s%s not present in %s" % (first.__name__, name, first_formatted, second.__name__)) second_argspec = inspect.getargspec(second_method) if first_argspec != second_argspec: second_formatted = inspect.formatargspec(*second_argspec) raise self.failureException("%s.%s%s != %s.%s%s" % (first.__name__, name, first_formatted, second.__name__, name, second_formatted)) def failUnlessRaises(self, excClass, callableObj, *args, **kwargs): """ Fail unless an exception of class excClass is thrown by callableObj when invoked with arguments args and keyword arguments kwargs. If a different type of exception is thrown, it will not be caught, and the test case will be deemed to have suffered an error, exactly as for an unexpected exception. It returns the exception instance if it matches the given exception class. """ try: result = callableObj(*args, **kwargs) except excClass, e: return e else: excName = excClass if hasattr(excClass, "__name__"): excName = excClass.__name__ raise self.failureException( "%s not raised (%r returned)" % (excName, result)) assertIs = failUnlessIs assertIsNot = failIfIs assertIn = failUnlessIn assertNotIn = failIfIn assertStartsWith = failUnlessStartsWith assertNotStartsWith = failIfStartsWith assertEndsWith = failUnlessEndsWith assertNotEndsWith = failIfEndsWith assertApproximates = failUnlessApproximates assertNotApproximates = failIfApproximates assertMethodsMatch = failUnlessMethodsMatch assertRaises = failUnlessRaises # The following are missing in Python < 2.4. assertTrue = unittest.TestCase.failUnless assertFalse = unittest.TestCase.failIf # The following is provided for compatibility with Twisted's trial. assertIdentical = assertIs assertNotIdentical = assertIsNot failUnlessIdentical = failUnlessIs failIfIdentical = failIfIs # -------------------------------------------------------------------- # Mocker. class classinstancemethod(object): def __init__(self, method): self.method = method def __get__(self, obj, cls=None): def bound_method(*args, **kwargs): return self.method(cls, obj, *args, **kwargs) return bound_method class MockerBase(object): """Controller of mock objects. A mocker instance is used to command recording and replay of expectations on any number of mock objects. Expectations should be expressed for the mock object while in record mode (the initial one) by using the mock object itself, and using the mocker (and/or C{expect()} as a helper) to define additional behavior for each event. For instance:: mock = mocker.mock() mock.hello() mocker.result("Hi!") mocker.replay() assert mock.hello() == "Hi!" mock.restore() mock.verify() In this short excerpt a mock object is being created, then an expectation of a call to the C{hello()} method was recorded, and when called the method should return the value C{10}. Then, the mocker is put in replay mode, and the expectation is satisfied by calling the C{hello()} method, which indeed returns 10. Finally, a call to the L{restore()} method is performed to undo any needed changes made in the environment, and the L{verify()} method is called to ensure that all defined expectations were met. The same logic can be expressed more elegantly using the C{with mocker:} statement, as follows:: mock = mocker.mock() mock.hello() mocker.result("Hi!") with mocker: assert mock.hello() == "Hi!" Also, the MockerTestCase class, which integrates the mocker on a unittest.TestCase subclass, may be used to reduce the overhead of controlling the mocker. A test could be written as follows:: class SampleTest(MockerTestCase): def test_hello(self): mock = self.mocker.mock() mock.hello() self.mocker.result("Hi!") self.mocker.replay() self.assertEquals(mock.hello(), "Hi!") """ _recorders = [] # For convenience only. on = expect class __metaclass__(type): def __init__(self, name, bases, dict): # Make independent lists on each subclass, inheriting from parent. self._recorders = list(getattr(self, "_recorders", ())) def __init__(self): self._recorders = self._recorders[:] self._events = [] self._recording = True self._ordering = False self._last_orderer = None def is_recording(self): """Return True if in recording mode, False if in replay mode. Recording is the initial state. """ return self._recording def replay(self): """Change to replay mode, where recorded events are reproduced. If already in replay mode, the mocker will be restored, with all expectations reset, and then put again in replay mode. An alternative and more comfortable way to replay changes is using the 'with' statement, as follows:: mocker = Mocker() with mocker: The 'with' statement will automatically put mocker in replay mode, and will also verify if all events were correctly reproduced at the end (using L{verify()}), and also restore any changes done in the environment (with L{restore()}). Also check the MockerTestCase class, which integrates the unittest.TestCase class with mocker. """ if not self._recording: for event in self._events: event.restore() else: self._recording = False for event in self._events: event.replay() def restore(self): """Restore changes in the environment, and return to recording mode. This should always be called after the test is complete (succeeding or not). There are ways to call this method automatically on completion (e.g. using a C{with mocker:} statement, or using the L{MockerTestCase} class. """ if not self._recording: self._recording = True for event in self._events: event.restore() def reset(self): """Reset the mocker state. This will restore environment changes, if currently in replay mode, and then remove all events previously recorded. """ if not self._recording: self.restore() self.unorder() del self._events[:] def get_events(self): """Return all recorded events.""" return self._events[:] def add_event(self, event): """Add an event. This method is used internally by the implementation, and shouldn't be needed on normal mocker usage. """ self._events.append(event) if self._ordering: orderer = event.add_task(Orderer(event.path)) if self._last_orderer: orderer.add_dependency(self._last_orderer) self._last_orderer = orderer return event def verify(self): """Check if all expectations were met, and raise AssertionError if not. The exception message will include a nice description of which expectations were not met, and why. """ errors = [] for event in self._events: try: event.verify() except AssertionError, e: error = str(e) if not error: raise RuntimeError("Empty error message from %r" % event) errors.append(error) if errors: message = [ERROR_PREFIX + "Unmet expectations:", ""] for error in errors: lines = error.splitlines() message.append("=> " + lines.pop(0)) message.extend([" " + line for line in lines]) message.append("") raise AssertionError(os.linesep.join(message)) def mock(self, spec_and_type=None, spec=None, type=None, name=None, count=True): """Return a new mock object. @param spec_and_type: Handy positional argument which sets both spec and type. @param spec: Method calls will be checked for correctness against the given class. @param type: If set, the Mock's __class__ attribute will return the given type. This will make C{isinstance()} calls on the object work. @param name: Name for the mock object, used in the representation of expressions. The name is rarely needed, as it's usually guessed correctly from the variable name used. @param count: If set to false, expressions may be executed any number of times, unless an expectation is explicitly set using the L{count()} method. By default, expressions are expected once. """ if spec_and_type is not None: spec = type = spec_and_type return Mock(self, spec=spec, type=type, name=name, count=count) def proxy(self, object, spec=True, type=True, name=None, count=True, passthrough=True): """Return a new mock object which proxies to the given object. Proxies are useful when only part of the behavior of an object is to be mocked. Unknown expressions may be passed through to the real implementation implicitly (if the C{passthrough} argument is True), or explicitly (using the L{passthrough()} method on the event). @param object: Real object to be proxied, and replaced by the mock on replay mode. It may also be an "import path", such as C{"time.time"}, in which case the object will be the C{time} function from the C{time} module. @param spec: Method calls will be checked for correctness against the given object, which may be a class or an instance where attributes will be looked up. Defaults to the the C{object} parameter. May be set to None explicitly, in which case spec checking is disabled. Checks may also be disabled explicitly on a per-event basis with the L{nospec()} method. @param type: If set, the Mock's __class__ attribute will return the given type. This will make C{isinstance()} calls on the object work. Defaults to the type of the C{object} parameter. May be set to None explicitly. @param name: Name for the mock object, used in the representation of expressions. The name is rarely needed, as it's usually guessed correctly from the variable name used. @param count: If set to false, expressions may be executed any number of times, unless an expectation is explicitly set using the L{count()} method. By default, expressions are expected once. @param passthrough: If set to False, passthrough of actions on the proxy to the real object will only happen when explicitly requested via the L{passthrough()} method. """ if isinstance(object, basestring): if name is None: name = object import_stack = object.split(".") attr_stack = [] while import_stack: module_path = ".".join(import_stack) try: __import__(module_path) except ImportError: attr_stack.insert(0, import_stack.pop()) if not import_stack: raise continue else: object = sys.modules[module_path] for attr in attr_stack: object = getattr(object, attr) break if isinstance(object, types.UnboundMethodType): object = object.im_func if spec is True: spec = object if type is True: type = __builtin__.type(object) return Mock(self, spec=spec, type=type, object=object, name=name, count=count, passthrough=passthrough) def replace(self, object, spec=True, type=True, name=None, count=True, passthrough=True): """Create a proxy, and replace the original object with the mock. On replay, the original object will be replaced by the returned proxy in all dictionaries found in the running interpreter via the garbage collecting system. This should cover module namespaces, class namespaces, instance namespaces, and so on. @param object: Real object to be proxied, and replaced by the mock on replay mode. It may also be an "import path", such as C{"time.time"}, in which case the object will be the C{time} function from the C{time} module. @param spec: Method calls will be checked for correctness against the given object, which may be a class or an instance where attributes will be looked up. Defaults to the the C{object} parameter. May be set to None explicitly, in which case spec checking is disabled. Checks may also be disabled explicitly on a per-event basis with the L{nospec()} method. @param type: If set, the Mock's __class__ attribute will return the given type. This will make C{isinstance()} calls on the object work. Defaults to the type of the C{object} parameter. May be set to None explicitly. @param name: Name for the mock object, used in the representation of expressions. The name is rarely needed, as it's usually guessed correctly from the variable name used. @param passthrough: If set to False, passthrough of actions on the proxy to the real object will only happen when explicitly requested via the L{passthrough()} method. """ mock = self.proxy(object, spec, type, name, count, passthrough) event = self._get_replay_restore_event() event.add_task(ProxyReplacer(mock)) return mock def patch(self, object, spec=True): """Patch an existing object to reproduce recorded events. @param object: Class or instance to be patched. @param spec: Method calls will be checked for correctness against the given object, which may be a class or an instance where attributes will be looked up. Defaults to the the C{object} parameter. May be set to None explicitly, in which case spec checking is disabled. Checks may also be disabled explicitly on a per-event basis with the L{nospec()} method. The result of this method is still a mock object, which can be used like any other mock object to record events. The difference is that when the mocker is put on replay mode, the *real* object will be modified to behave according to recorded expectations. Patching works in individual instances, and also in classes. When an instance is patched, recorded events will only be considered on this specific instance, and other instances should behave normally. When a class is patched, the reproduction of events will be considered on any instance of this class once created (collectively). Observe that, unlike with proxies which catch only events done through the mock object, *all* accesses to recorded expectations will be considered; even these coming from the object itself (e.g. C{self.hello()} is considered if this method was patched). While this is a very powerful feature, and many times the reason to use patches in the first place, it's important to keep this behavior in mind. Patching of the original object only takes place when the mocker is put on replay mode, and the patched object will be restored to its original state once the L{restore()} method is called (explicitly, or implicitly with alternative conventions, such as a C{with mocker:} block, or a MockerTestCase class). """ if spec is True: spec = object patcher = Patcher() event = self._get_replay_restore_event() event.add_task(patcher) mock = Mock(self, object=object, patcher=patcher, passthrough=True, spec=spec) patcher.patch_attr(object, '__mocker_mock__', mock) return mock def act(self, path): """This is called by mock objects whenever something happens to them. This method is part of the implementation between the mocker and mock objects. """ if self._recording: event = self.add_event(Event(path)) for recorder in self._recorders: recorder(self, event) return Mock(self, path) else: # First run events that may run, then run unsatisfied events, then # ones not previously run. We put the index in the ordering tuple # instead of the actual event because we want a stable sort # (ordering between 2 events is undefined). events = self._events order = [(events[i].satisfied()*2 + events[i].has_run(), i) for i in range(len(events))] order.sort() postponed = None for weight, i in order: event = events[i] if event.matches(path): if event.may_run(path): return event.run(path) elif postponed is None: postponed = event if postponed is not None: return postponed.run(path) raise MatchError(ERROR_PREFIX + "Unexpected expression: %s" % path) def get_recorders(cls, self): """Return recorders associated with this mocker class or instance. This method may be called on mocker instances and also on mocker classes. See the L{add_recorder()} method for more information. """ return (self or cls)._recorders[:] get_recorders = classinstancemethod(get_recorders) def add_recorder(cls, self, recorder): """Add a recorder to this mocker class or instance. @param recorder: Callable accepting C{(mocker, event)} as parameters. This is part of the implementation of mocker. All registered recorders are called for translating events that happen during recording into expectations to be met once the state is switched to replay mode. This method may be called on mocker instances and also on mocker classes. When called on a class, the recorder will be used by all instances, and also inherited on subclassing. When called on instances, the recorder is added only to the given instance. """ (self or cls)._recorders.append(recorder) return recorder add_recorder = classinstancemethod(add_recorder) def remove_recorder(cls, self, recorder): """Remove the given recorder from this mocker class or instance. This method may be called on mocker classes and also on mocker instances. See the L{add_recorder()} method for more information. """ (self or cls)._recorders.remove(recorder) remove_recorder = classinstancemethod(remove_recorder) def result(self, value): """Make the last recorded event return the given value on replay. @param value: Object to be returned when the event is replayed. """ self.call(lambda *args, **kwargs: value) def generate(self, sequence): """Last recorded event will return a generator with the given sequence. @param sequence: Sequence of values to be generated. """ def generate(*args, **kwargs): for value in sequence: yield value self.call(generate) def throw(self, exception): """Make the last recorded event raise the given exception on replay. @param exception: Class or instance of exception to be raised. """ def raise_exception(*args, **kwargs): raise exception self.call(raise_exception) def call(self, func): """Make the last recorded event cause the given function to be called. @param func: Function to be called. The result of the function will be used as the event result. """ self._events[-1].add_task(FunctionRunner(func)) def count(self, min, max=False): """Last recorded event must be replayed between min and max times. @param min: Minimum number of times that the event must happen. @param max: Maximum number of times that the event must happen. If not given, it defaults to the same value of the C{min} parameter. If set to None, there is no upper limit, and the expectation is met as long as it happens at least C{min} times. """ event = self._events[-1] for task in event.get_tasks(): if isinstance(task, RunCounter): event.remove_task(task) event.add_task(RunCounter(min, max)) def is_ordering(self): """Return true if all events are being ordered. See the L{order()} method. """ return self._ordering def unorder(self): """Disable the ordered mode. See the L{order()} method for more information. """ self._ordering = False self._last_orderer = None def order(self, *path_holders): """Create an expectation of order between two or more events. @param path_holders: Objects returned as the result of recorded events. By default, mocker won't force events to happen precisely in the order they were recorded. Calling this method will change this behavior so that events will only match if reproduced in the correct order. There are two ways in which this method may be used. Which one is used in a given occasion depends only on convenience. If no arguments are passed, the mocker will be put in a mode where all the recorded events following the method call will only be met if they happen in order. When that's used, the mocker may be put back in unordered mode by calling the L{unorder()} method, or by using a 'with' block, like so:: with mocker.ordered(): In this case, only expressions in will be ordered, and the mocker will be back in unordered mode after the 'with' block. The second way to use it is by specifying precisely which events should be ordered. As an example:: mock = mocker.mock() expr1 = mock.hello() expr2 = mock.world expr3 = mock.x.y.z mocker.order(expr1, expr2, expr3) This method of ordering only works when the expression returns another object. Also check the L{after()} and L{before()} methods, which are alternative ways to perform this. """ if not path_holders: self._ordering = True return OrderedContext(self) last_orderer = None for path_holder in path_holders: if type(path_holder) is Path: path = path_holder else: path = path_holder.__mocker_path__ for event in self._events: if event.path is path: for task in event.get_tasks(): if isinstance(task, Orderer): orderer = task break else: orderer = Orderer(path) event.add_task(orderer) if last_orderer: orderer.add_dependency(last_orderer) last_orderer = orderer break def after(self, *path_holders): """Last recorded event must happen after events referred to. @param path_holders: Objects returned as the result of recorded events which should happen before the last recorded event As an example, the idiom:: expect(mock.x).after(mock.y, mock.z) is an alternative way to say:: expr_x = mock.x expr_y = mock.y expr_z = mock.z mocker.order(expr_y, expr_x) mocker.order(expr_z, expr_x) See L{order()} for more information. """ last_path = self._events[-1].path for path_holder in path_holders: self.order(path_holder, last_path) def before(self, *path_holders): """Last recorded event must happen before events referred to. @param path_holders: Objects returned as the result of recorded events which should happen after the last recorded event As an example, the idiom:: expect(mock.x).before(mock.y, mock.z) is an alternative way to say:: expr_x = mock.x expr_y = mock.y expr_z = mock.z mocker.order(expr_x, expr_y) mocker.order(expr_x, expr_z) See L{order()} for more information. """ last_path = self._events[-1].path for path_holder in path_holders: self.order(last_path, path_holder) def nospec(self): """Don't check method specification of real object on last event. By default, when using a mock created as the result of a call to L{proxy()}, L{replace()}, and C{patch()}, or when passing the spec attribute to the L{mock()} method, method calls on the given object are checked for correctness against the specification of the real object (or the explicitly provided spec). This method will disable that check specifically for the last recorded event. """ event = self._events[-1] for task in event.get_tasks(): if isinstance(task, SpecChecker): event.remove_task(task) def passthrough(self, result_callback=None): """Make the last recorded event run on the real object once seen. @param result_callback: If given, this function will be called with the result of the *real* method call as the only argument. This can only be used on proxies, as returned by the L{proxy()} and L{replace()} methods, or on mocks representing patched objects, as returned by the L{patch()} method. """ event = self._events[-1] if event.path.root_object is None: raise TypeError("Mock object isn't a proxy") event.add_task(PathExecuter(result_callback)) def __enter__(self): """Enter in a 'with' context. This will run replay().""" self.replay() return self def __exit__(self, type, value, traceback): """Exit from a 'with' context. This will run restore() at all times, but will only run verify() if the 'with' block itself hasn't raised an exception. Exceptions in that block are never swallowed. """ self.restore() if type is None: self.verify() return False def _get_replay_restore_event(self): """Return unique L{ReplayRestoreEvent}, creating if needed. Some tasks only want to replay/restore. When that's the case, they shouldn't act on other events during replay. Also, they can all be put in a single event when that's the case. Thus, we add a single L{ReplayRestoreEvent} as the first element of the list. """ if not self._events or type(self._events[0]) != ReplayRestoreEvent: self._events.insert(0, ReplayRestoreEvent()) return self._events[0] class OrderedContext(object): def __init__(self, mocker): self._mocker = mocker def __enter__(self): return None def __exit__(self, type, value, traceback): self._mocker.unorder() class Mocker(MockerBase): __doc__ = MockerBase.__doc__ # Decorator to add recorders on the standard Mocker class. recorder = Mocker.add_recorder # -------------------------------------------------------------------- # Mock object. class Mock(object): def __init__(self, mocker, path=None, name=None, spec=None, type=None, object=None, passthrough=False, patcher=None, count=True): self.__mocker__ = mocker self.__mocker_path__ = path or Path(self, object) self.__mocker_name__ = name self.__mocker_spec__ = spec self.__mocker_object__ = object self.__mocker_passthrough__ = passthrough self.__mocker_patcher__ = patcher self.__mocker_replace__ = False self.__mocker_type__ = type self.__mocker_count__ = count def __mocker_act__(self, kind, args=(), kwargs={}, object=None): if self.__mocker_name__ is None: self.__mocker_name__ = find_object_name(self, 2) action = Action(kind, args, kwargs, self.__mocker_path__) path = self.__mocker_path__ + action if object is not None: path.root_object = object try: return self.__mocker__.act(path) except MatchError, exception: root_mock = path.root_mock if (path.root_object is not None and root_mock.__mocker_passthrough__): return path.execute(path.root_object) # Reinstantiate to show raise statement on traceback, and # also to make the traceback shown shorter. raise MatchError(str(exception)) except AssertionError, e: lines = str(e).splitlines() message = [ERROR_PREFIX + "Unmet expectation:", ""] message.append("=> " + lines.pop(0)) message.extend([" " + line for line in lines]) message.append("") raise AssertionError(os.linesep.join(message)) def __getattribute__(self, name): if name.startswith("__mocker_"): return super(Mock, self).__getattribute__(name) if name == "__class__": if self.__mocker__.is_recording() or self.__mocker_type__ is None: return type(self) return self.__mocker_type__ if name == "__length_hint__": # This is used by Python 2.6+ to optimize the allocation # of arrays in certain cases. Pretend it doesn't exist. raise AttributeError("No __length_hint__ here!") return self.__mocker_act__("getattr", (name,)) def __setattr__(self, name, value): if name.startswith("__mocker_"): return super(Mock, self).__setattr__(name, value) return self.__mocker_act__("setattr", (name, value)) def __delattr__(self, name): return self.__mocker_act__("delattr", (name,)) def __call__(self, *args, **kwargs): return self.__mocker_act__("call", args, kwargs) def __contains__(self, value): return self.__mocker_act__("contains", (value,)) def __getitem__(self, key): return self.__mocker_act__("getitem", (key,)) def __setitem__(self, key, value): return self.__mocker_act__("setitem", (key, value)) def __delitem__(self, key): return self.__mocker_act__("delitem", (key,)) def __len__(self): # MatchError is turned on an AttributeError so that list() and # friends act properly when trying to get length hints on # something that doesn't offer them. try: result = self.__mocker_act__("len") except MatchError, e: raise AttributeError(str(e)) if type(result) is Mock: return 0 return result def __nonzero__(self): try: result = self.__mocker_act__("nonzero") except MatchError, e: return True if type(result) is Mock: return True return result def __iter__(self): # XXX On py3k, when next() becomes __next__(), we'll be able # to return the mock itself because it will be considered # an iterator (we'll be mocking __next__ as well, which we # can't now). result = self.__mocker_act__("iter") if type(result) is Mock: return iter([]) return result # When adding a new action kind here, also add support for it on # Action.execute() and Path.__str__(). def find_object_name(obj, depth=0): """Try to detect how the object is named on a previous scope.""" try: frame = sys._getframe(depth+1) except: return None for name, frame_obj in frame.f_locals.iteritems(): if frame_obj is obj: return name self = frame.f_locals.get("self") if self is not None: try: items = list(self.__dict__.iteritems()) except: pass else: for name, self_obj in items: if self_obj is obj: return name return None # -------------------------------------------------------------------- # Action and path. class Action(object): def __init__(self, kind, args, kwargs, path=None): self.kind = kind self.args = args self.kwargs = kwargs self.path = path self._execute_cache = {} def __repr__(self): if self.path is None: return "Action(%r, %r, %r)" % (self.kind, self.args, self.kwargs) return "Action(%r, %r, %r, %r)" % \ (self.kind, self.args, self.kwargs, self.path) def __eq__(self, other): return (self.kind == other.kind and self.args == other.args and self.kwargs == other.kwargs) def __ne__(self, other): return not self.__eq__(other) def matches(self, other): return (self.kind == other.kind and match_params(self.args, self.kwargs, other.args, other.kwargs)) def execute(self, object): # This caching scheme may fail if the object gets deallocated before # the action, as the id might get reused. It's somewhat easy to fix # that with a weakref callback. For our uses, though, the object # should never get deallocated before the action itself, so we'll # just keep it simple. if id(object) in self._execute_cache: return self._execute_cache[id(object)] execute = getattr(object, "__mocker_execute__", None) if execute is not None: result = execute(self, object) else: kind = self.kind if kind == "getattr": result = getattr(object, self.args[0]) elif kind == "setattr": result = setattr(object, self.args[0], self.args[1]) elif kind == "delattr": result = delattr(object, self.args[0]) elif kind == "call": result = object(*self.args, **self.kwargs) elif kind == "contains": result = self.args[0] in object elif kind == "getitem": result = object[self.args[0]] elif kind == "setitem": result = object[self.args[0]] = self.args[1] elif kind == "delitem": del object[self.args[0]] result = None elif kind == "len": result = len(object) elif kind == "nonzero": result = bool(object) elif kind == "iter": result = iter(object) else: raise RuntimeError("Don't know how to execute %r kind." % kind) self._execute_cache[id(object)] = result return result class Path(object): def __init__(self, root_mock, root_object=None, actions=()): self.root_mock = root_mock self.root_object = root_object self.actions = tuple(actions) self.__mocker_replace__ = False def parent_path(self): if not self.actions: return None return self.actions[-1].path parent_path = property(parent_path) def __add__(self, action): """Return a new path which includes the given action at the end.""" return self.__class__(self.root_mock, self.root_object, self.actions + (action,)) def __eq__(self, other): """Verify if the two paths are equal. Two paths are equal if they refer to the same mock object, and have the actions with equal kind, args and kwargs. """ if (self.root_mock is not other.root_mock or self.root_object is not other.root_object or len(self.actions) != len(other.actions)): return False for action, other_action in zip(self.actions, other.actions): if action != other_action: return False return True def matches(self, other): """Verify if the two paths are equivalent. Two paths are equal if they refer to the same mock object, and have the same actions performed on them. """ if (self.root_mock is not other.root_mock or len(self.actions) != len(other.actions)): return False for action, other_action in zip(self.actions, other.actions): if not action.matches(other_action): return False return True def execute(self, object): """Execute all actions sequentially on object, and return result. """ for action in self.actions: object = action.execute(object) return object def __str__(self): """Transform the path into a nice string such as obj.x.y('z').""" result = self.root_mock.__mocker_name__ or "" for action in self.actions: if action.kind == "getattr": result = "%s.%s" % (result, action.args[0]) elif action.kind == "setattr": result = "%s.%s = %r" % (result, action.args[0], action.args[1]) elif action.kind == "delattr": result = "del %s.%s" % (result, action.args[0]) elif action.kind == "call": args = [repr(x) for x in action.args] items = list(action.kwargs.iteritems()) items.sort() for pair in items: args.append("%s=%r" % pair) result = "%s(%s)" % (result, ", ".join(args)) elif action.kind == "contains": result = "%r in %s" % (action.args[0], result) elif action.kind == "getitem": result = "%s[%r]" % (result, action.args[0]) elif action.kind == "setitem": result = "%s[%r] = %r" % (result, action.args[0], action.args[1]) elif action.kind == "delitem": result = "del %s[%r]" % (result, action.args[0]) elif action.kind == "len": result = "len(%s)" % result elif action.kind == "nonzero": result = "bool(%s)" % result elif action.kind == "iter": result = "iter(%s)" % result else: raise RuntimeError("Don't know how to format kind %r" % action.kind) return result class SpecialArgument(object): """Base for special arguments for matching parameters.""" def __init__(self, object=None): self.object = object def __repr__(self): if self.object is None: return self.__class__.__name__ else: return "%s(%r)" % (self.__class__.__name__, self.object) def matches(self, other): return True def __eq__(self, other): return type(other) == type(self) and self.object == other.object class ANY(SpecialArgument): """Matches any single argument.""" ANY = ANY() class ARGS(SpecialArgument): """Matches zero or more positional arguments.""" ARGS = ARGS() class KWARGS(SpecialArgument): """Matches zero or more keyword arguments.""" KWARGS = KWARGS() class IS(SpecialArgument): def matches(self, other): return self.object is other def __eq__(self, other): return type(other) == type(self) and self.object is other.object class CONTAINS(SpecialArgument): def matches(self, other): try: other.__contains__ except AttributeError: try: iter(other) except TypeError: # If an object can't be iterated, and has no __contains__ # hook, it'd blow up on the test below. We test this in # advance to prevent catching more errors than we really # want. return False return self.object in other class IN(SpecialArgument): def matches(self, other): return other in self.object class MATCH(SpecialArgument): def matches(self, other): return bool(self.object(other)) def __eq__(self, other): return type(other) == type(self) and self.object is other.object def match_params(args1, kwargs1, args2, kwargs2): """Match the two sets of parameters, considering special parameters.""" has_args = ARGS in args1 has_kwargs = KWARGS in args1 if has_kwargs: args1 = [arg1 for arg1 in args1 if arg1 is not KWARGS] elif len(kwargs1) != len(kwargs2): return False if not has_args and len(args1) != len(args2): return False # Either we have the same number of kwargs, or unknown keywords are # accepted (KWARGS was used), so check just the ones in kwargs1. for key, arg1 in kwargs1.iteritems(): if key not in kwargs2: return False arg2 = kwargs2[key] if isinstance(arg1, SpecialArgument): if not arg1.matches(arg2): return False elif arg1 != arg2: return False # Keywords match. Now either we have the same number of # arguments, or ARGS was used. If ARGS wasn't used, arguments # must match one-on-one necessarily. if not has_args: for arg1, arg2 in zip(args1, args2): if isinstance(arg1, SpecialArgument): if not arg1.matches(arg2): return False elif arg1 != arg2: return False return True # Easy choice. Keywords are matching, and anything on args is accepted. if (ARGS,) == args1: return True # We have something different there. If we don't have positional # arguments on the original call, it can't match. if not args2: # Unless we have just several ARGS (which is bizarre, but..). for arg1 in args1: if arg1 is not ARGS: return False return True # Ok, all bets are lost. We have to actually do the more expensive # matching. This is an algorithm based on the idea of the Levenshtein # Distance between two strings, but heavily hacked for this purpose. args2l = len(args2) if args1[0] is ARGS: args1 = args1[1:] array = [0]*args2l else: array = [1]*args2l for i in range(len(args1)): last = array[0] if args1[i] is ARGS: for j in range(1, args2l): last, array[j] = array[j], min(array[j-1], array[j], last) else: array[0] = i or int(args1[i] != args2[0]) for j in range(1, args2l): last, array[j] = array[j], last or int(args1[i] != args2[j]) if 0 not in array: return False if array[-1] != 0: return False return True # -------------------------------------------------------------------- # Event and task base. class Event(object): """Aggregation of tasks that keep track of a recorded action. An event represents something that may or may not happen while the mocked environment is running, such as an attribute access, or a method call. The event is composed of several tasks that are orchestrated together to create a composed meaning for the event, including for which actions it should be run, what happens when it runs, and what's the expectations about the actions run. """ def __init__(self, path=None): self.path = path self._tasks = [] self._has_run = False def add_task(self, task): """Add a new task to this taks.""" self._tasks.append(task) return task def remove_task(self, task): self._tasks.remove(task) def get_tasks(self): return self._tasks[:] def matches(self, path): """Return true if *all* tasks match the given path.""" for task in self._tasks: if not task.matches(path): return False return bool(self._tasks) def has_run(self): return self._has_run def may_run(self, path): """Verify if any task would certainly raise an error if run. This will call the C{may_run()} method on each task and return false if any of them returns false. """ for task in self._tasks: if not task.may_run(path): return False return True def run(self, path): """Run all tasks with the given action. @param path: The path of the expression run. Running an event means running all of its tasks individually and in order. An event should only ever be run if all of its tasks claim to match the given action. The result of this method will be the last result of a task which isn't None, or None if they're all None. """ self._has_run = True result = None errors = [] for task in self._tasks: try: task_result = task.run(path) except AssertionError, e: error = str(e) if not error: raise RuntimeError("Empty error message from %r" % task) errors.append(error) else: if task_result is not None: result = task_result if errors: message = [str(self.path)] if str(path) != message[0]: message.append("- Run: %s" % path) for error in errors: lines = error.splitlines() message.append("- " + lines.pop(0)) message.extend([" " + line for line in lines]) raise AssertionError(os.linesep.join(message)) return result def satisfied(self): """Return true if all tasks are satisfied. Being satisfied means that there are no unmet expectations. """ for task in self._tasks: try: task.verify() except AssertionError: return False return True def verify(self): """Run verify on all tasks. The verify method is supposed to raise an AssertionError if the task has unmet expectations, with a one-line explanation about why this item is unmet. This method should be safe to be called multiple times without side effects. """ errors = [] for task in self._tasks: try: task.verify() except AssertionError, e: error = str(e) if not error: raise RuntimeError("Empty error message from %r" % task) errors.append(error) if errors: message = [str(self.path)] for error in errors: lines = error.splitlines() message.append("- " + lines.pop(0)) message.extend([" " + line for line in lines]) raise AssertionError(os.linesep.join(message)) def replay(self): """Put all tasks in replay mode.""" self._has_run = False for task in self._tasks: task.replay() def restore(self): """Restore the state of all tasks.""" for task in self._tasks: task.restore() class ReplayRestoreEvent(Event): """Helper event for tasks which need replay/restore but shouldn't match.""" def matches(self, path): return False class Task(object): """Element used to track one specific aspect on an event. A task is responsible for adding any kind of logic to an event. Examples of that are counting the number of times the event was made, verifying parameters if any, and so on. """ def matches(self, path): """Return true if the task is supposed to be run for the given path. """ return True def may_run(self, path): """Return false if running this task would certainly raise an error.""" return True def run(self, path): """Perform the task item, considering that the given action happened. """ def verify(self): """Raise AssertionError if expectations for this item are unmet. The verify method is supposed to raise an AssertionError if the task has unmet expectations, with a one-line explanation about why this item is unmet. This method should be safe to be called multiple times without side effects. """ def replay(self): """Put the task in replay mode. Any expectations of the task should be reset. """ def restore(self): """Restore any environmental changes made by the task. Verify should continue to work after this is called. """ # -------------------------------------------------------------------- # Task implementations. class OnRestoreCaller(Task): """Call a given callback when restoring.""" def __init__(self, callback): self._callback = callback def restore(self): self._callback() class PathMatcher(Task): """Match the action path against a given path.""" def __init__(self, path): self.path = path def matches(self, path): return self.path.matches(path) def path_matcher_recorder(mocker, event): event.add_task(PathMatcher(event.path)) Mocker.add_recorder(path_matcher_recorder) class RunCounter(Task): """Task which verifies if the number of runs are within given boundaries. """ def __init__(self, min, max=False): self.min = min if max is None: self.max = sys.maxint elif max is False: self.max = min else: self.max = max self._runs = 0 def replay(self): self._runs = 0 def may_run(self, path): return self._runs < self.max def run(self, path): self._runs += 1 if self._runs > self.max: self.verify() def verify(self): if not self.min <= self._runs <= self.max: if self._runs < self.min: raise AssertionError("Performed fewer times than expected.") raise AssertionError("Performed more times than expected.") class ImplicitRunCounter(RunCounter): """RunCounter inserted by default on any event. This is a way to differentiate explicitly added counters and implicit ones. """ def run_counter_recorder(mocker, event): """Any event may be repeated once, unless disabled by default.""" if event.path.root_mock.__mocker_count__: event.add_task(ImplicitRunCounter(1)) Mocker.add_recorder(run_counter_recorder) def run_counter_removal_recorder(mocker, event): """ Events created by getattr actions which lead to other events may be repeated any number of times. For that, we remove implicit run counters of any getattr actions leading to the current one. """ parent_path = event.path.parent_path for event in mocker.get_events()[::-1]: if (event.path is parent_path and event.path.actions[-1].kind == "getattr"): for task in event.get_tasks(): if type(task) is ImplicitRunCounter: event.remove_task(task) Mocker.add_recorder(run_counter_removal_recorder) class MockReturner(Task): """Return a mock based on the action path.""" def __init__(self, mocker): self.mocker = mocker def run(self, path): return Mock(self.mocker, path) def mock_returner_recorder(mocker, event): """Events that lead to other events must return mock objects.""" parent_path = event.path.parent_path for event in mocker.get_events(): if event.path is parent_path: for task in event.get_tasks(): if isinstance(task, MockReturner): break else: event.add_task(MockReturner(mocker)) break Mocker.add_recorder(mock_returner_recorder) class FunctionRunner(Task): """Task that runs a function everything it's run. Arguments of the last action in the path are passed to the function, and the function result is also returned. """ def __init__(self, func): self._func = func def run(self, path): action = path.actions[-1] return self._func(*action.args, **action.kwargs) class PathExecuter(Task): """Task that executes a path in the real object, and returns the result.""" def __init__(self, result_callback=None): self._result_callback = result_callback def get_result_callback(self): return self._result_callback def run(self, path): result = path.execute(path.root_object) if self._result_callback is not None: self._result_callback(result) return result class Orderer(Task): """Task to establish an order relation between two events. An orderer task will only match once all its dependencies have been run. """ def __init__(self, path): self.path = path self._run = False self._dependencies = [] def replay(self): self._run = False def has_run(self): return self._run def may_run(self, path): for dependency in self._dependencies: if not dependency.has_run(): return False return True def run(self, path): for dependency in self._dependencies: if not dependency.has_run(): raise AssertionError("Should be after: %s" % dependency.path) self._run = True def add_dependency(self, orderer): self._dependencies.append(orderer) def get_dependencies(self): return self._dependencies class SpecChecker(Task): """Task to check if arguments of the last action conform to a real method. """ def __init__(self, method): self._method = method self._unsupported = False if method: try: self._args, self._varargs, self._varkwargs, self._defaults = \ inspect.getargspec(method) except TypeError: self._unsupported = True else: if self._defaults is None: self._defaults = () if type(method) is type(self.run): self._args = self._args[1:] def get_method(self): return self._method def _raise(self, message): spec = inspect.formatargspec(self._args, self._varargs, self._varkwargs, self._defaults) raise AssertionError("Specification is %s%s: %s" % (self._method.__name__, spec, message)) def verify(self): if not self._method: raise AssertionError("Method not found in real specification") def may_run(self, path): try: self.run(path) except AssertionError: return False return True def run(self, path): if not self._method: raise AssertionError("Method not found in real specification") if self._unsupported: return # Can't check it. Happens with builtin functions. :-( action = path.actions[-1] obtained_len = len(action.args) obtained_kwargs = action.kwargs.copy() nodefaults_len = len(self._args) - len(self._defaults) for i, name in enumerate(self._args): if i < obtained_len and name in action.kwargs: self._raise("%r provided twice" % name) if (i >= obtained_len and i < nodefaults_len and name not in action.kwargs): self._raise("%r not provided" % name) obtained_kwargs.pop(name, None) if obtained_len > len(self._args) and not self._varargs: self._raise("too many args provided") if obtained_kwargs and not self._varkwargs: self._raise("unknown kwargs: %s" % ", ".join(obtained_kwargs)) def spec_checker_recorder(mocker, event): spec = event.path.root_mock.__mocker_spec__ if spec: actions = event.path.actions if len(actions) == 1: if actions[0].kind == "call": method = getattr(spec, "__call__", None) event.add_task(SpecChecker(method)) elif len(actions) == 2: if actions[0].kind == "getattr" and actions[1].kind == "call": method = getattr(spec, actions[0].args[0], None) event.add_task(SpecChecker(method)) Mocker.add_recorder(spec_checker_recorder) class ProxyReplacer(Task): """Task which installs and deinstalls proxy mocks. This task will replace a real object by a mock in all dictionaries found in the running interpreter via the garbage collecting system. """ def __init__(self, mock): self.mock = mock self.__mocker_replace__ = False def replay(self): global_replace(self.mock.__mocker_object__, self.mock) def restore(self): global_replace(self.mock, self.mock.__mocker_object__) def global_replace(remove, install): """Replace object 'remove' with object 'install' on all dictionaries.""" for referrer in gc.get_referrers(remove): if (type(referrer) is dict and referrer.get("__mocker_replace__", True)): for key, value in list(referrer.iteritems()): if value is remove: referrer[key] = install class Undefined(object): def __repr__(self): return "Undefined" Undefined = Undefined() class Patcher(Task): def __init__(self): super(Patcher, self).__init__() self._monitored = {} # {kind: {id(object): object}} self._patched = {} def is_monitoring(self, obj, kind): monitored = self._monitored.get(kind) if monitored: if id(obj) in monitored: return True cls = type(obj) if issubclass(cls, type): cls = obj bases = set([id(base) for base in cls.__mro__]) bases.intersection_update(monitored) return bool(bases) return False def monitor(self, obj, kind): if kind not in self._monitored: self._monitored[kind] = {} self._monitored[kind][id(obj)] = obj def patch_attr(self, obj, attr, value): original = obj.__dict__.get(attr, Undefined) self._patched[id(obj), attr] = obj, attr, original setattr(obj, attr, value) def get_unpatched_attr(self, obj, attr): cls = type(obj) if issubclass(cls, type): cls = obj result = Undefined for mro_cls in cls.__mro__: key = (id(mro_cls), attr) if key in self._patched: result = self._patched[key][2] if result is not Undefined: break elif attr in mro_cls.__dict__: result = mro_cls.__dict__.get(attr, Undefined) break if isinstance(result, object) and hasattr(type(result), "__get__"): if cls is obj: obj = None return result.__get__(obj, cls) return result def _get_kind_attr(self, kind): if kind == "getattr": return "__getattribute__" return "__%s__" % kind def replay(self): for kind in self._monitored: attr = self._get_kind_attr(kind) seen = set() for obj in self._monitored[kind].itervalues(): cls = type(obj) if issubclass(cls, type): cls = obj if cls not in seen: seen.add(cls) unpatched = getattr(cls, attr, Undefined) self.patch_attr(cls, attr, PatchedMethod(kind, unpatched, self.is_monitoring)) self.patch_attr(cls, "__mocker_execute__", self.execute) def restore(self): for obj, attr, original in self._patched.itervalues(): if original is Undefined: delattr(obj, attr) else: setattr(obj, attr, original) self._patched.clear() def execute(self, action, object): attr = self._get_kind_attr(action.kind) unpatched = self.get_unpatched_attr(object, attr) try: return unpatched(*action.args, **action.kwargs) except AttributeError: type, value, traceback = sys.exc_info() if action.kind == "getattr": # The normal behavior of Python is to try __getattribute__, # and if it raises AttributeError, try __getattr__. We've # tried the unpatched __getattribute__ above, and we'll now # try __getattr__. try: __getattr__ = unpatched("__getattr__") except AttributeError: pass else: return __getattr__(*action.args, **action.kwargs) raise type, value, traceback class PatchedMethod(object): def __init__(self, kind, unpatched, is_monitoring): self._kind = kind self._unpatched = unpatched self._is_monitoring = is_monitoring def __get__(self, obj, cls=None): object = obj or cls if not self._is_monitoring(object, self._kind): return self._unpatched.__get__(obj, cls) def method(*args, **kwargs): if self._kind == "getattr" and args[0].startswith("__mocker_"): return self._unpatched.__get__(obj, cls)(args[0]) mock = object.__mocker_mock__ return mock.__mocker_act__(self._kind, args, kwargs, object) return method def __call__(self, obj, *args, **kwargs): # At least with __getattribute__, Python seems to use *both* the # descriptor API and also call the class attribute directly. It # looks like an interpreter bug, or at least an undocumented # inconsistency. return self.__get__(obj)(*args, **kwargs) def patcher_recorder(mocker, event): mock = event.path.root_mock if mock.__mocker_patcher__ and len(event.path.actions) == 1: patcher = mock.__mocker_patcher__ patcher.monitor(mock.__mocker_object__, event.path.actions[0].kind) Mocker.add_recorder(patcher_recorder) landscape-client-14.01/landscape/tests/test_patch.py0000644000175000017500000001463112301414317022346 0ustar andreasandreasimport sqlite3 from landscape.lib.persist import Persist from landscape.patch import UpgradeManager, SQLiteUpgradeManager from landscape.tests.helpers import LandscapeTest class PatchTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.persist = Persist() self.manager = UpgradeManager() def test_wb_nopatches(self): """ Applying no patches should make no change to the database, apart from maybe specifying a default version. """ self.assertEqual(self.persist._hardmap, {}) self.manager.apply(self.persist) self.assertEqual(self.persist._hardmap, {"system-version": 0}) def test_one_patch(self): """Test that patches are called and passed a L{Persist} object.""" l = [] self.manager.register_upgrader(1, l.append) self.manager.apply(self.persist) self.assertEqual(l, [self.persist]) def test_two_patches(self): """Test that patches are run in order.""" l = [] self.manager.register_upgrader(2, lambda x: l.append(2)) self.manager.register_upgrader(1, lambda x: l.append(1)) self.manager.apply(self.persist) self.assertEqual(l, [1, 2]) def test_record_version(self): """When a patch is run it should update the C{system-version}.""" self.assertEqual(self.persist.get("system-version"), None) self.manager.register_upgrader(1, lambda x: None) self.manager.apply(self.persist) self.assertEqual(self.persist.get("system-version"), 1) def test_only_apply_unapplied_versions(self): """Upgraders should only be run if they haven't been run before.""" l = [] self.manager.register_upgrader(1, lambda x: l.append(1)) self.manager.apply(self.persist) self.manager.apply(self.persist) self.assertEqual(l, [1]) def test_initialize(self): """Marking no upgraders as applied should leave the version at 0.""" self.manager.initialize(self.persist) self.assertEqual(self.persist.get("system-version"), 0) def test_initialize_with_upgraders(self): """ After registering some upgraders, initialize should set the version for the new persist to the highest version number available, without running any of the upgraders. """ self.manager.register_upgrader(1, lambda x: 1 / 0) self.manager.register_upgrader(5, lambda x: 1 / 0) self.manager.register_upgrader(3, lambda x: 1 / 0) self.manager.initialize(self.persist) self.assertEqual(self.persist.get("system-version"), 5) def test_decorated_upgraders_run(self): """ Upgraders that use the L{upgrader} decorator should automatically register themselves with a given L{UpgradeManager} and be run when the manager applies patches. """ upgrade_manager = UpgradeManager() @upgrade_manager.upgrader(1) def upgrade(persist): self.persist.set("upgrade-called", True) upgrade_manager.apply(self.persist) self.assertTrue(self.persist.get("upgrade-called")) class SQLitePatchTest(LandscapeTest): def setUp(self): LandscapeTest.setUp(self) self.db_filename = self.makeFile() self.db = sqlite3.connect(self.db_filename, isolation_level=None) self.cursor = self.db.cursor() self.manager = SQLiteUpgradeManager() self.version_query = "SELECT MAX(version) from patch" def test_no_patches(self): """ Applying no patches should make no change to the database, apart from maybe specifying a default version. """ self.manager.initialize(self.cursor) self.manager.apply(self.cursor) self.assertEqual(self.manager.get_database_versions(self.cursor), set()) def test_one_patch(self): """Test that patches are called and passed a sqlite db object.""" l = [] self.manager.initialize(self.cursor) self.manager.register_upgrader(1, l.append) self.manager.apply(self.cursor) self.assertEqual(l, [self.cursor]) self.cursor.execute(self.version_query) self.assertEqual(self.cursor.fetchone(), (1,)) def test_two_patches(self): """Test that patches are run in order.""" l = [] self.manager.initialize(self.cursor) self.manager.register_upgrader(2, lambda x: l.append(2)) self.manager.register_upgrader(1, lambda x: l.append(1)) self.manager.apply(self.cursor) self.assertEqual(l, [1, 2]) self.cursor.execute(self.version_query) self.assertEqual(self.cursor.fetchone(), (2,)) def test_only_apply_unapplied_versions(self): """Upgraders should only be run if they haven't been run before.""" patch1 = [] patch2 = [] patch3 = [] self.manager.initialize(self.cursor) self.manager.register_upgrader(1, lambda x: patch1.append(1)) self.manager.register_upgrader(2, lambda x: patch2.append(1)) self.manager.register_upgrader(3, lambda x: patch3.append(1)) self.manager.apply_one(2, self.cursor) self.assertEqual((patch1, patch2, patch3), ([], [1], [])) self.manager.apply(self.cursor) self.assertEqual((patch1, patch2, patch3), ([1], [1], [1])) def test_initialize_with_upgraders(self): """ After registering some upgraders, initialize should set the version of the newly created database to the highest version available. """ self.manager.register_upgrader(1, lambda x: 1 / 0) self.manager.register_upgrader(5, lambda x: 1 / 0) self.manager.register_upgrader(3, lambda x: 1 / 0) self.manager.initialize(self.cursor) self.assertEqual(self.manager.get_database_versions(self.cursor), set([1, 3, 5])) def test_decorated_upgraders_run(self): """ Upgraders that use the L{upgrader} decorator should automatically register themselves with a given L{UpgradeManager} and be run when the manager applies patches. """ upgrade_manager = SQLiteUpgradeManager() upgrade_manager.initialize(self.cursor) l = [] @upgrade_manager.upgrader(1) def upgrade(db): l.append(db) upgrade_manager.apply(self.cursor) self.assertEqual(l, [self.cursor]) landscape-client-14.01/landscape/tests/__init__.py0000644000175000017500000000000012301414317021730 0ustar andreasandreaslandscape-client-14.01/landscape/tests/subunit.py0000644000175000017500000003224612301414317021703 0ustar andreasandreas# # subunit: extensions to python unittest to get test results from subprocesses. # Copyright (C) 2005 Robert Collins # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os from StringIO import StringIO import subprocess import sys import unittest def test_suite(): import subunit.tests return subunit.tests.test_suite() def join_dir(base_path, path): """ Returns an absolute path to C{path}, calculated relative to the parent of C{base_path}. @param base_path: A path to a file or directory. @param path: An absolute path, or a path relative to the containing directory of C{base_path}. @return: An absolute path to C{path}. """ return os.path.join(os.path.dirname(os.path.abspath(base_path)), path) class TestProtocolServer(object): """A class for receiving results from a TestProtocol client.""" OUTSIDE_TEST = 0 TEST_STARTED = 1 READING_FAILURE = 2 READING_ERROR = 3 def __init__(self, client, stream=sys.stdout): """Create a TestProtocol server instance. client should be an object that provides - startTest - addSuccess - addFailure - addError - stopTest methods, i.e. a TestResult. """ self.state = TestProtocolServer.OUTSIDE_TEST self.client = client self._stream = stream def _addError(self, offset, line): if (self.state == TestProtocolServer.TEST_STARTED and self.current_test_description == line[offset:-1]): self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None self.client.addError(self._current_test, RemoteError("")) self.client.stopTest(self._current_test) self._current_test = None elif (self.state == TestProtocolServer.TEST_STARTED and self.current_test_description + " [" == line[offset:-1]): self.state = TestProtocolServer.READING_ERROR self._message = "" else: self.stdOutLineReceived(line) def _addFailure(self, offset, line): if (self.state == TestProtocolServer.TEST_STARTED and self.current_test_description == line[offset:-1]): self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None self.client.addFailure(self._current_test, RemoteError()) self.client.stopTest(self._current_test) elif (self.state == TestProtocolServer.TEST_STARTED and self.current_test_description + " [" == line[offset:-1]): self.state = TestProtocolServer.READING_FAILURE self._message = "" else: self.stdOutLineReceived(line) def _addSuccess(self, offset, line): if (self.state == TestProtocolServer.TEST_STARTED and self.current_test_description == line[offset:-1]): self.client.addSuccess(self._current_test) self.client.stopTest(self._current_test) self.current_test_description = None self._current_test = None self.state = TestProtocolServer.OUTSIDE_TEST else: self.stdOutLineReceived(line) def _appendMessage(self, line): if line[0:2] == " ]": # quoted ] start self._message += line[1:] else: self._message += line def endQuote(self, line): if self.state == TestProtocolServer.READING_FAILURE: self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None self.client.addFailure(self._current_test, RemoteError(self._message)) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_ERROR: self.state = TestProtocolServer.OUTSIDE_TEST self.current_test_description = None self.client.addError(self._current_test, RemoteError(self._message)) self.client.stopTest(self._current_test) else: self.stdOutLineReceived(line) def lineReceived(self, line): """Call the appropriate local method for the received line.""" if line == "]\n": self.endQuote(line) elif (self.state == TestProtocolServer.READING_FAILURE or self.state == TestProtocolServer.READING_ERROR): self._appendMessage(line) else: parts = line.split(None, 1) if len(parts) == 2: cmd, rest = parts offset = len(cmd) + 1 cmd = cmd.strip(':') if cmd in ('test', 'testing'): self._startTest(offset, line) elif cmd == 'error': self._addError(offset, line) elif cmd == 'failure': self._addFailure(offset, line) elif cmd in ('success', 'successful'): self._addSuccess(offset, line) else: self.stdOutLineReceived(line) else: self.stdOutLineReceived(line) def lostConnection(self): """The input connection has finished.""" if self.state == TestProtocolServer.TEST_STARTED: self.client.addError(self._current_test, RemoteError("lost connection during test '%s'" % self.current_test_description)) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_ERROR: self.client.addError(self._current_test, RemoteError("lost connection during " "error report of test " "'%s'" % self.current_test_description)) self.client.stopTest(self._current_test) elif self.state == TestProtocolServer.READING_FAILURE: self.client.addError(self._current_test, RemoteError("lost connection during " "failure report of test " "'%s'" % self.current_test_description)) self.client.stopTest(self._current_test) def readFrom(self, pipe): for line in pipe.readlines(): self.lineReceived(line) self.lostConnection() def _startTest(self, offset, line): """Internal call to change state machine. Override startTest().""" if self.state == TestProtocolServer.OUTSIDE_TEST: self.state = TestProtocolServer.TEST_STARTED self._current_test = RemotedTestCase(line[offset:-1]) self.current_test_description = line[offset:-1] self.client.startTest(self._current_test) else: self.stdOutLineReceived(line) def stdOutLineReceived(self, line): self._stream.write(line) class RemoteException(Exception): """An exception that occured remotely to python.""" def __eq__(self, other): try: return self.args == other.args except AttributeError: return False class TestProtocolClient(unittest.TestResult): """A class that looks like a TestResult and informs a TestProtocolServer.""" def __init__(self, stream): unittest.TestResult.__init__(self) self._stream = stream def addError(self, test, error): """Report an error in test test.""" self._stream.write("error: %s [\n" % test.shortDescription()) for line in self._exc_info_to_string(error, test).splitlines(): self._stream.write("%s\n" % line) self._stream.write("]\n") def addFailure(self, test, error): """Report a failure in test test.""" self._stream.write("failure: %s [\n" % test.shortDescription()) for line in self._exc_info_to_string(error, test).splitlines(): self._stream.write("%s\n" % line) self._stream.write("]\n") def addSuccess(self, test): """Report a success in a test.""" self._stream.write("successful: %s\n" % test.shortDescription()) def startTest(self, test): """Mark a test as starting its test run.""" self._stream.write("test: %s\n" % test.shortDescription()) def RemoteError(description=""): if description == "": description = "\n" return (RemoteException, RemoteException(description), None) class RemotedTestCase(unittest.TestCase): """A class to represent test cases run in child processes.""" def __eq__ (self, other): try: return self.__description == other.__description except AttributeError: return False def __init__(self, description): """Create a psuedo test case with description description.""" self.__description = description def error(self, label): raise NotImplementedError("%s on RemotedTestCases is not permitted." % label) def setUp(self): self.error("setUp") def tearDown(self): self.error("tearDown") def shortDescription(self): return self.__description def id(self): return "%s.%s" % (self._strclass(), self.__description) def __str__(self): return "%s (%s)" % (self.__description, self._strclass()) def __repr__(self): return "<%s description='%s'>" % \ (self._strclass(), self.__description) def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) result.addError(self, RemoteError("Cannot run RemotedTestCases.\n")) result.stopTest(self) def _strclass(self): cls = self.__class__ return "%s.%s" % (cls.__module__, cls.__name__) class ExecTestCase(unittest.TestCase): """A test case which runs external scripts for test fixtures.""" def __init__(self, methodName='runTest'): """Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name. """ unittest.TestCase.__init__(self, methodName) testMethod = getattr(self, methodName) self.script = join_dir(sys.modules[self.__class__.__module__].__file__, testMethod.__doc__) def countTestCases(self): return 1 def run(self, result=None): if result is None: result = self.defaultTestResult() self._run(result) def debug(self): """Run the test without collecting errors in a TestResult""" self._run(unittest.TestResult()) def _run(self, result): protocol = TestProtocolServer(result) output = subprocess.Popen([self.script], stdout=subprocess.PIPE).communicate()[0] protocol.readFrom(StringIO(output)) class IsolatedTestCase(unittest.TestCase): """A TestCase which runs its tests in a forked process.""" def run(self, result=None): if result is None: result = self.defaultTestResult() run_isolated(unittest.TestCase, self, result) class IsolatedTestSuite(unittest.TestSuite): """A TestCase which runs its tests in a forked process.""" def run(self, result=None): if result is None: result = unittest.TestResult() run_isolated(unittest.TestSuite, self, result) def run_isolated(klass, self, result): """Run a test suite or case in a subprocess, using the run method on klass. """ c2pread, c2pwrite = os.pipe() # fixme - error -> result # now fork pid = os.fork() if pid == 0: # Child # Close parent's pipe ends os.close(c2pread) # Dup fds for child os.dup2(c2pwrite, 1) # Close pipe fds. os.close(c2pwrite) # at this point, sys.stdin is redirected, now we want # to filter it to escape ]'s. ### XXX: test and write that bit. result = TestProtocolClient(sys.stdout) klass.run(self, result) sys.stdout.flush() sys.stderr.flush() # exit HARD, exit NOW. os._exit(0) else: # Parent # Close child pipe ends os.close(c2pwrite) # hookup a protocol engine protocol = TestProtocolServer(result) protocol.readFrom(os.fdopen(c2pread, 'rU')) os.waitpid(pid, 0) # TODO return code evaluation. return result landscape-client-14.01/landscape/tests/test_schema.py0000644000175000017500000002103212301414317022500 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.schema import ( InvalidError, Constant, Bool, Int, Float, Bytes, Unicode, List, KeyDict, Dict, Tuple, Any, Message) class DummySchema(object): def coerce(self, value): return "hello!" class BasicTypesTest(LandscapeTest): def test_any(self): schema = Any(Constant(None), Unicode()) self.assertEqual(schema.coerce(None), None) self.assertEqual(schema.coerce(u"foo"), u"foo") def test_any_bad(self): schema = Any(Constant(None), Unicode()) self.assertRaises(InvalidError, schema.coerce, object()) def test_constant(self): self.assertEqual(Constant("hello").coerce("hello"), "hello") def test_constant_arbitrary(self): obj = object() self.assertEqual(Constant(obj).coerce(obj), obj) def test_constant_bad(self): self.assertRaises(InvalidError, Constant("foo").coerce, object()) def test_bool(self): self.assertEqual(Bool().coerce(True), True) self.assertEqual(Bool().coerce(False), False) def test_bool_bad(self): self.assertRaises(InvalidError, Bool().coerce, 1) def test_int(self): self.assertEqual(Int().coerce(3), 3) def test_int_accepts_long(self): self.assertEqual(Int().coerce(3L), 3) def test_int_bad_str(self): self.assertRaises(InvalidError, Int().coerce, "3") def test_int_bad_float(self): self.assertRaises(InvalidError, Int().coerce, 3.0) def test_float(self): self.assertEqual(Float().coerce(3.3), 3.3) def test_float_accepts_int(self): self.assertEqual(Float().coerce(3), 3.0) def test_float_accepts_long(self): self.assertEqual(Float().coerce(3L), 3.0) def test_float_bad_str(self): self.assertRaises(InvalidError, Float().coerce, "3.0") def test_string(self): self.assertEqual(Bytes().coerce("foo"), "foo") def test_string_bad_unicode(self): self.assertRaises(InvalidError, Bytes().coerce, u"foo") def test_string_bad_anything(self): self.assertRaises(InvalidError, Bytes().coerce, object()) def test_unicode(self): self.assertEqual(Unicode().coerce(u"foo"), u"foo") def test_unicode_bad_value(self): """Invalid values raise an errors.""" self.assertRaises(InvalidError, Unicode().coerce, 32) def test_unicode_with_str(self): """Unicode accept plain strings and return a unicode.""" self.assertEqual(Unicode().coerce("foo"), u"foo") def test_unicode_decodes(self): """Unicode should decode plain strings.""" a = u"\N{HIRAGANA LETTER A}" self.assertEqual(Unicode().coerce(a.encode("utf-8")), a) letter = u"\N{LATIN SMALL LETTER A WITH GRAVE}" self.assertEqual( Unicode(encoding="latin-1").coerce(letter.encode("latin-1")), letter) def test_unicode_or_str_bad_encoding(self): """Decoding errors should be converted to InvalidErrors.""" self.assertRaises(InvalidError, Unicode().coerce, "\xff") def test_list(self): self.assertEqual(List(Int()).coerce([1]), [1]) def test_list_bad(self): self.assertRaises(InvalidError, List(Int()).coerce, 32) def test_list_inner_schema_coerces(self): self.assertEqual(List(DummySchema()).coerce([3]), ["hello!"]) def test_list_bad_inner_schema(self): self.assertRaises(InvalidError, List(Int()).coerce, ["hello"]) def test_list_multiple_items(self): a = u"\N{HIRAGANA LETTER A}" schema = List(Unicode()) self.assertEqual(schema.coerce([a, a.encode("utf-8")]), [a, a]) def test_tuple(self): self.assertEqual(Tuple(Int()).coerce((1,)), (1,)) def test_tuple_coerces(self): self.assertEqual(Tuple(Int(), DummySchema()).coerce((23, object())), (23, "hello!")) def test_tuple_bad(self): self.assertRaises(InvalidError, Tuple().coerce, object()) def test_tuple_inner_schema_bad(self): self.assertRaises(InvalidError, Tuple(Int()).coerce, (object(),)) def test_tuple_must_have_all_items(self): self.assertRaises(InvalidError, Tuple(Int(), Int()).coerce, (1,)) def test_tuple_must_have_no_more_items(self): self.assertRaises(InvalidError, Tuple(Int()).coerce, (1, 2)) def test_key_dict(self): self.assertEqual(KeyDict({"foo": Int()}).coerce({"foo": 1}), {"foo": 1}) def test_key_dict_coerces(self): self.assertEqual(KeyDict({"foo": DummySchema()}).coerce({"foo": 3}), {"foo": "hello!"}) def test_key_dict_bad_inner_schema(self): self.assertRaises(InvalidError, KeyDict({"foo": Int()}).coerce, {"foo": "hello"}) def test_key_dict_unknown_key(self): self.assertRaises(InvalidError, KeyDict({}).coerce, {"foo": 1}) def test_key_dict_bad(self): self.assertRaises(InvalidError, KeyDict({}).coerce, object()) def test_key_dict_multiple_items(self): schema = KeyDict({"one": Int(), "two": List(Float())}) input = {"one": 32, "two": [1.5, 2.3]} self.assertEqual(schema.coerce(input), {"one": 32, "two": [1.5, 2.3]}) def test_key_dict_arbitrary_keys(self): """ KeyDict doesn't actually need to have strings as keys, just any object which hashes the same. """ key = object() self.assertEqual(KeyDict({key: Int()}).coerce({key: 32}), {key: 32}) def test_key_dict_must_have_all_keys(self): """ dicts which are applied to a KeyDict must have all the keys specified in the KeyDict. """ schema = KeyDict({"foo": Int()}) self.assertRaises(InvalidError, schema.coerce, {}) def test_key_dict_optional_keys(self): """KeyDict allows certain keys to be optional. """ schema = KeyDict({"foo": Int(), "bar": Int()}, optional=["bar"]) self.assertEqual(schema.coerce({"foo": 32}), {"foo": 32}) def test_pass_optional_key(self): """Regression test. It should be possible to pass an optional key. """ schema = KeyDict({"foo": Int()}, optional=["foo"]) self.assertEqual(schema.coerce({"foo": 32}), {"foo": 32}) def test_dict(self): self.assertEqual(Dict(Int(), Bytes()).coerce({32: "hello."}), {32: "hello."}) def test_dict_coerces(self): self.assertEqual( Dict(DummySchema(), DummySchema()).coerce({32: object()}), {"hello!": "hello!"}) def test_dict_inner_bad(self): self.assertRaises(InvalidError, Dict(Int(), Int()).coerce, {"32": 32}) def test_dict_wrong_type(self): self.assertRaises(InvalidError, Dict(Int(), Int()).coerce, 32) def test_message(self): """The L{Message} schema should be very similar to KeyDict.""" schema = Message("foo", {"data": Int()}) self.assertEqual( schema.coerce({"type": "foo", "data": 3}), {"type": "foo", "data": 3}) def test_message_timestamp(self): """L{Message} schemas should accept C{timestamp} keys.""" schema = Message("bar", {}) self.assertEqual( schema.coerce({"type": "bar", "timestamp": 0.33}), {"type": "bar", "timestamp": 0.33}) def test_message_api(self): """L{Message} schemas should accept C{api} keys.""" schema = Message("baz", {}) self.assertEqual( schema.coerce({"type": "baz", "api": "whatever"}), {"type": "baz", "api": "whatever"}) def test_message_api_None(self): """L{Message} schemas should accept None for C{api}.""" schema = Message("baz", {}) self.assertEqual( schema.coerce({"type": "baz", "api": None}), {"type": "baz", "api": None}) def test_message_optional(self): """The L{Message} schema should allow additional optional keys.""" schema = Message("foo", {"data": Int()}, optional=["data"]) self.assertEqual(schema.coerce({"type": "foo"}), {"type": "foo"}) def test_message_type(self): """The C{type} should be introspectable on L{Message} objects.""" schema = Message("foo", {}) self.assertEqual(schema.type, "foo") def test_message_with_unknown_fields(self): """ The L{Message} schema discards unknown fields when coercing values. """ schema = Message("foo", {}) self.assertEqual({"type": "foo"}, schema.coerce({"type": "foo", "crap": 123})) landscape-client-14.01/landscape/tests/test_watchdog.py0000644000175000017500000015232612301414317023053 0ustar andreasandreasimport stat import time import sys import os import signal import logging from twisted.internet.utils import getProcessOutput from twisted.internet.defer import Deferred, succeed, fail from twisted.internet import reactor from twisted.internet.task import deferLater from landscape.tests.mocker import ARGS, KWARGS, ANY from landscape.tests.clock import Clock from landscape.tests.helpers import ( LandscapeTest, EnvironSaverHelper, FakeBrokerServiceHelper) from landscape.watchdog import ( Daemon, WatchDog, WatchDogService, ExecutableNotFoundError, WatchDogConfiguration, bootstrap_list, MAXIMUM_CONSECUTIVE_RESTARTS, RESTART_BURST_DELAY, run, Broker, Monitor, Manager) from landscape.lib.dns import discover_server from landscape.configuration import ( fetch_base64_ssl_public_certificate, print_text) from landscape.amp import ComponentConnector from landscape.broker.amp import RemoteBrokerConnector from landscape.deployment import Configuration from landscape.reactor import LandscapeReactor import landscape.watchdog class StubDaemon(object): program = "program-name" class WatchDogTest(LandscapeTest): """ Tests for L{landscape.watchdog.WatchDog}. """ def setUp(self): super(WatchDogTest, self).setUp() self.broker_factory = self.mocker.replace( "landscape.watchdog.Broker", passthrough=False) self.monitor_factory = self.mocker.replace( "landscape.watchdog.Monitor", passthrough=False) self.manager_factory = self.mocker.replace( "landscape.watchdog.Manager", passthrough=False) self.config = WatchDogConfiguration() def start_all_daemons(self): self.broker = self.broker_factory(ANY, verbose=False, config=None) self.monitor = self.monitor_factory(ANY, verbose=False, config=None) self.manager = self.manager_factory(ANY, verbose=False, config=None) self.expect(self.broker.program).result("landscape-broker") self.mocker.count(0, None) self.expect(self.manager.program).result("landscape-manager") self.mocker.count(0, None) self.expect(self.monitor.program).result("landscape-monitor") self.mocker.count(0, None) def test_daemon_construction(self): """The WatchDog sets up some daemons when constructed.""" self.start_all_daemons() self.mocker.replay() WatchDog(config=self.config) def test_limited_daemon_construction(self): self.broker_factory(ANY, verbose=False, config=None) self.monitor_factory(ANY, verbose=False, config=None) # The manager should *not* be constructed self.manager_factory(ARGS, KWARGS) self.mocker.count(0) self.mocker.replay() WatchDog(enabled_daemons=[Broker, Monitor], config=self.config) def test_check_running_one(self): self.start_all_daemons() self.expect(self.broker.is_running()).result(succeed(True)) self.expect(self.monitor.is_running()).result(succeed(False)) self.expect(self.manager.is_running()).result(succeed(False)) self.mocker.replay() result = WatchDog(config=self.config).check_running() def got_result(r): self.assertEqual([daemon.program for daemon in r], ["landscape-broker"]) return result.addCallback(got_result) def test_check_running_many(self): self.start_all_daemons() self.expect(self.broker.is_running()).result(succeed(True)) self.expect(self.monitor.is_running()).result(succeed(True)) self.expect(self.manager.is_running()).result(succeed(True)) self.mocker.replay() result = WatchDog(config=self.config).check_running() def got_result(r): self.assertEqual([daemon.program for daemon in r], ["landscape-broker", "landscape-monitor", "landscape-manager"]) return result.addCallback(got_result) def test_check_running_limited_daemons(self): """ When the user has explicitly asked not to run some daemons, those daemons which are not being run should not checked. """ self.broker = self.broker_factory(ANY, verbose=False, config=None) self.expect(self.broker.program).result("landscape-broker") self.expect(self.broker.is_running()).result(succeed(True)) self.mocker.replay() result = WatchDog(enabled_daemons=[Broker], config=self.config).check_running() def got_result(r): self.assertEqual(len(r), 1) self.assertEqual(r[0].program, "landscape-broker") return result.addCallback(got_result) def expect_request_exit(self): self.expect(self.broker.prepare_for_shutdown()) self.expect(self.monitor.prepare_for_shutdown()) self.expect(self.manager.prepare_for_shutdown()) self.expect(self.broker.request_exit()).result(succeed(True)) self.expect(self.broker.wait_or_die()).result(succeed(None)) self.expect(self.monitor.wait_or_die()).result(succeed(None)) self.expect(self.manager.wait_or_die()).result(succeed(None)) def test_start_and_stop_daemons(self): """The WatchDog will start all daemons, starting with the broker.""" self.start_all_daemons() self.mocker.order() self.broker.start() self.monitor.start() self.manager.start() self.expect_request_exit() self.mocker.replay() clock = Clock() dog = WatchDog(clock, config=self.config) dog.start() clock.advance(0) return dog.request_exit() def test_start_limited_daemons(self): """ start only starts the daemons which are actually enabled. """ self.broker = self.broker_factory(ANY, verbose=False, config=None) self.expect(self.broker.program).result("landscape-broker") self.mocker.count(0, None) self.broker.start() self.mocker.replay() clock = Clock() dog = WatchDog(clock, enabled_daemons=[Broker], config=self.config) dog.start() def test_request_exit(self): """request_exit() asks the broker to exit. The broker itself is responsible for notifying other plugins to exit. When the deferred returned from request_exit fires, the process should definitely be gone. """ self.start_all_daemons() self.expect_request_exit() self.mocker.replay() return WatchDog(config=self.config).request_exit() def test_ping_reply_after_request_exit_should_not_restart_processes(self): """ When request_exit occurs between a ping request and response, a failing ping response should not cause the process to be restarted. """ self.start_all_daemons() self.mocker.order() self.broker.start() self.monitor.start() self.manager.start() monitor_ping_result = Deferred() self.expect(self.broker.is_running()).result(succeed(True)) self.expect(self.monitor.is_running()).result(monitor_ping_result) self.expect(self.manager.is_running()).result(succeed(True)) self.expect_request_exit() # And the monitor should never be explicitly stopped / restarted. self.expect(self.monitor.stop()).count(0) self.expect(self.monitor.start()).count(0) self.mocker.replay() clock = Clock() dog = WatchDog(clock, config=self.config) dog.start() clock.advance(0) clock.advance(5) result = dog.request_exit() monitor_ping_result.callback(False) return result START = "start" STOP = "stop" class BoringDaemon(object): def __init__(self, program): self.program = program self.boots = [] def start(self): self.boots.append(START) def stop(self): self.boots.append(STOP) return succeed(None) def is_running(self): return succeed(True) def request_exit(self): return succeed(True) def wait(self): return succeed(None) def wait_or_die(self): return self.wait() def prepare_for_shutdown(self): pass class AsynchronousPingDaemon(BoringDaemon): pings = 0 deferred = None def is_running(self): self.pings += 1 if self.deferred is not None: raise AssertionError( "is_running called while it's already running!") self.deferred = Deferred() return self.deferred def fire_running(self, value): self.deferred.callback(value) self.deferred = None class NonMockerWatchDogTests(LandscapeTest): def test_ping_is_not_rescheduled_until_pings_complete(self): clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=AsynchronousPingDaemon("test-monitor"), manager=AsynchronousPingDaemon("test-manager")) dog.start_monitoring() clock.advance(5) for daemon in dog.daemons: self.assertEqual(daemon.pings, 1) clock.advance(5) for daemon in dog.daemons: self.assertEqual(daemon.pings, 1) daemon.fire_running(True) clock.advance(5) for daemon in dog.daemons: self.assertEqual(daemon.pings, 2) def test_check_daemons(self): """ The daemons are checked to be running every so often. When N=5 of these checks fail, the daemon will be restarted. """ clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=AsynchronousPingDaemon("test-monitor"), manager=AsynchronousPingDaemon("test-manager")) dog.start_monitoring() for i in range(4): clock.advance(5) dog.broker.fire_running(False) dog.monitor.fire_running(True) dog.manager.fire_running(True) self.assertEqual(dog.broker.boots, []) clock.advance(5) dog.broker.fire_running(False) dog.monitor.fire_running(True) dog.manager.fire_running(True) self.assertEqual(dog.broker.boots, [STOP, START]) def test_counted_ping_failures_reset_on_success(self): """ When a failing ping is followed by a successful ping, it will then require 5 more ping failures to restart the daemon. """ clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=AsynchronousPingDaemon("test-monitor"), manager=AsynchronousPingDaemon("test-manager")) dog.start_monitoring() clock.advance(5) dog.broker.fire_running(False) dog.monitor.fire_running(True) dog.manager.fire_running(True) clock.advance(5) dog.broker.fire_running(True) dog.monitor.fire_running(True) dog.manager.fire_running(True) for i in range(4): clock.advance(5) dog.broker.fire_running(False) dog.monitor.fire_running(True) dog.manager.fire_running(True) self.assertEqual(dog.broker.boots, []) clock.advance(5) dog.broker.fire_running(False) dog.monitor.fire_running(True) dog.manager.fire_running(True) self.assertEqual(dog.broker.boots, [STOP, START]) def test_exiting_during_outstanding_ping_works(self): """ This is a regression test. Some code called .cancel() on a timed call without checking if it was active first. Asynchronous is_running will cause the scheduled call to exist but already fired. """ clock = Clock() dog = WatchDog(clock, broker=BoringDaemon("test-broker"), monitor=BoringDaemon("test-monitor"), manager=AsynchronousPingDaemon("test-manager")) dog.start_monitoring() clock.advance(5) return dog.request_exit() def test_wait_for_stop_before_start(self): """ When a daemon times out and the watchdog attempts to kill it, it should not be restarted until the process has fully died. """ clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=BoringDaemon("test-monitor"), manager=BoringDaemon("test-manager")) stop_result = Deferred() dog.broker.stop = lambda: stop_result dog.start_monitoring() for i in range(5): clock.advance(5) dog.broker.fire_running(False) self.assertEqual(dog.broker.boots, []) stop_result.callback(None) self.assertEqual(dog.broker.boots, ["start"]) def test_wait_for_stop_before_ping(self): """ When a daemon times out and the watchdog restarts it, it should not be pinged until after the restart completes. """ clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=BoringDaemon("test-monitor"), manager=BoringDaemon("test-manager")) stop_result = Deferred() dog.broker.stop = lambda: stop_result dog.start_monitoring() for i in range(5): clock.advance(5) dog.broker.fire_running(False) self.assertEqual(dog.broker.boots, []) self.assertEqual(dog.broker.pings, 5) clock.advance(5) # wait some more to see if a ping happens self.assertEqual(dog.broker.pings, 5) stop_result.callback(None) self.assertEqual(dog.broker.boots, ["start"]) clock.advance(5) self.assertEqual(dog.broker.pings, 6) def test_ping_failure_counter_reset_after_restart(self): """ When a daemon stops responding and gets restarted after 5 failed pings, it will wait for another 5 failed pings before it will be restarted again. """ clock = Clock() dog = WatchDog(clock, broker=AsynchronousPingDaemon("test-broker"), monitor=BoringDaemon("test-monitor"), manager=BoringDaemon("test-manager")) dog.start_monitoring() for i in range(5): clock.advance(5) dog.broker.fire_running(False) self.assertEqual(dog.broker.boots, ["stop", "start"]) for i in range(4): clock.advance(5) dog.broker.fire_running(False) self.assertEqual(dog.broker.boots, ["stop", "start"]) clock.advance(5) dog.broker.fire_running(False) self.assertEqual(dog.broker.boots, ["stop", "start", "stop", "start"]) def test_die_when_broker_unavailable(self): """ If the broker is not running, the client should still be able to shut down. """ self.log_helper.ignore_errors( "Couldn't request that broker gracefully shut down; " "killing forcefully.") clock = Clock() dog = WatchDog(clock, broker=BoringDaemon("test-broker"), monitor=BoringDaemon("test-monitor"), manager=BoringDaemon("test-manager")) # request_exit returns False when there's no broker, as tested by # DaemonTest.test_request_exit_without_broker dog.broker.request_exit = lambda: succeed(False) # The manager's wait method never fires its deferred because nothing # told it to die because the broker is dead! manager_result = Deferred() dog.manager.wait = lambda: manager_result def stop(): manager_result.callback(True) return succeed(True) dog.manager.stop = stop result = dog.request_exit() return result class StubBroker(object): name = "broker" class RemoteStubBrokerConnector(ComponentConnector): component = StubBroker class DaemonTestBase(LandscapeTest): connector_factory = RemoteStubBrokerConnector def setUp(self): super(DaemonTestBase, self).setUp() self.exec_dir = self.makeDir() self.exec_name = os.path.join(self.exec_dir, "landscape-broker") self.saved_argv = sys.argv sys.argv = [os.path.join(self.exec_dir, "arv0_execname")] if hasattr(self, "broker_service"): # DaemonBrokerTest self.broker_service.startService() self.config = self.broker_service.config else: # DaemonTest self.config = WatchDogConfiguration() self.config.data_path = self.makeDir() self.makeDir(path=self.config.sockets_path) self.connector = self.connector_factory(LandscapeReactor(), self.config) self.daemon = self.get_daemon() def tearDown(self): sys.argv = self.saved_argv if hasattr(self, "broker_service"): # DaemonBrokerTest self.broker_service.stopService() super(DaemonTestBase, self).tearDown() def get_daemon(self, **kwargs): if 'username' in kwargs: class MyDaemon(Daemon): username = kwargs.pop('username') else: MyDaemon = Daemon daemon = MyDaemon(self.connector, **kwargs) daemon.program = os.path.basename(self.exec_name) daemon.factor = 0.01 return daemon class FileChangeWaiter(object): def __init__(self, filename): os.utime(filename, (0, 0)) self._mtime = os.path.getmtime(filename) self._filename = filename def wait(self): while self._mtime == os.path.getmtime(self._filename): time.sleep(0.1) class DaemonTest(DaemonTestBase): def test_find_executable_works(self): self.makeFile("I'm the broker.", path=self.exec_name) self.assertEqual(self.daemon.find_executable(), self.exec_name) def test_find_executable_cant_find_file(self): self.assertRaises(ExecutableNotFoundError, self.daemon.find_executable) def test_start_process(self): output_filename = self.makeFile("NOT RUN") self.makeFile('#!/bin/sh\necho "RUN $@" > %s' % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) waiter = FileChangeWaiter(output_filename) self.daemon.start() waiter.wait() self.assertEqual(open(output_filename).read(), "RUN --ignore-sigint --quiet\n") return self.daemon.stop() def test_start_process_with_verbose(self): output_filename = self.makeFile("NOT RUN") self.makeFile('#!/bin/sh\necho "RUN $@" > %s' % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) waiter = FileChangeWaiter(output_filename) daemon = self.get_daemon(verbose=True) daemon.start() waiter.wait() self.assertEqual(open(output_filename).read(), "RUN --ignore-sigint\n") return daemon.stop() def test_kill_process_with_sigterm(self): """The stop() method sends SIGTERM to the subprocess.""" output_filename = self.makeFile("NOT RUN") self.makeFile("#!%s\n" "import time\n" "file = open(%r, 'w')\n" "file.write('RUN')\n" "file.close()\n" "time.sleep(1000)\n" % (sys.executable, output_filename), path=self.exec_name) os.chmod(self.exec_name, 0755) waiter = FileChangeWaiter(output_filename) self.daemon.start() waiter.wait() self.assertEqual(open(output_filename).read(), "RUN") return self.daemon.stop() def test_kill_process_with_sigkill(self): """ Verify that killing process really works, even if something is holding the process badly. In these cases, a SIGKILL is performed some time after the SIGTERM was issued and didn't work. """ output_filename = self.makeFile("NOT RUN") self.makeFile("#!%s\n" "import signal, os\n" "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" "file = open(%r, 'w')\n" "file.write('RUN')\n" "file.close()\n" "os.kill(os.getpid(), signal.SIGSTOP)\n" % (sys.executable, output_filename), path=self.exec_name) os.chmod(self.exec_name, 0755) self.addCleanup(setattr, landscape.watchdog, "SIGKILL_DELAY", landscape.watchdog.SIGKILL_DELAY) landscape.watchdog.SIGKILL_DELAY = 1 waiter = FileChangeWaiter(output_filename) self.daemon.start() waiter.wait() self.assertEqual(open(output_filename).read(), "RUN") return self.daemon.stop() def test_wait_for_process(self): """ The C{wait} method returns a Deferred that fires when the process has died. """ output_filename = self.makeFile("NOT RUN") self.makeFile('#!/bin/sh\necho "RUN" > %s' % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "RUN\n") return self.daemon.wait().addCallback(got_result) def test_wait_or_die_dies_happily(self): """ The C{wait_or_die} method will wait for the process to die for a certain amount of time, just like C{wait}. """ output_filename = self.makeFile("NOT RUN") self.makeFile('#!/bin/sh\necho "RUN" > %s' % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "RUN\n") return self.daemon.wait_or_die().addCallback(got_result) def test_wait_or_die_terminates(self): """wait_or_die eventually terminates the process.""" output_filename = self.makeFile("NOT RUN") self.makeFile("""\ #!%(exe)s import time import signal file = open(%(out)r, 'w') file.write('unsignalled') file.close() def term(frame, sig): file = open(%(out)r, 'w') file.write('TERMINATED') file.close() signal.signal(signal.SIGTERM, term) time.sleep(999) """ % {"exe": sys.executable, "out": output_filename}, path=self.exec_name) os.chmod(self.exec_name, 0755) self.addCleanup(setattr, landscape.watchdog, "GRACEFUL_WAIT_PERIOD", landscape.watchdog.GRACEFUL_WAIT_PERIOD) landscape.watchdog.GRACEFUL_WAIT_PERIOD = 0.2 self.daemon.start() def got_result(result): self.assertEqual(open(output_filename).read(), "TERMINATED") return self.daemon.wait_or_die().addCallback(got_result) def test_wait_or_die_kills(self): """ wait_or_die eventually falls back to KILLing a process, after waiting and terminating don't work. """ output_filename = self.makeFile("NOT RUN") self.makeFile("#!%s\n" "import signal, os\n" "signal.signal(signal.SIGTERM, signal.SIG_IGN)\n" "file = open(%r, 'w')\n" "file.write('RUN')\n" "file.close()\n" "os.kill(os.getpid(), signal.SIGSTOP)\n" % (sys.executable, output_filename), path=self.exec_name) os.chmod(self.exec_name, 0755) self.addCleanup(setattr, landscape.watchdog, "SIGKILL_DELAY", landscape.watchdog.SIGKILL_DELAY) self.addCleanup(setattr, landscape.watchdog, "GRACEFUL_WAIT_PERIOD", landscape.watchdog.GRACEFUL_WAIT_PERIOD) landscape.watchdog.GRACEFUL_WAIT_PERIOD = 1 landscape.watchdog.SIGKILL_DELAY = 1 waiter = FileChangeWaiter(output_filename) self.daemon.start() waiter.wait() self.assertEqual(open(output_filename).read(), "RUN") return self.daemon.wait_or_die() def test_wait_for_unstarted_process(self): """ If a process has never been started, waiting for it is immediately successful. """ daemon = self.get_daemon() def assert_wait(is_running): self.assertFalse(is_running) return daemon.wait() result = daemon.is_running() result.addCallback(assert_wait) return result def test_wait_or_die_for_unstarted_process(self): """ If a process has never been started, wait_or_die is immediately successful. """ daemon = self.get_daemon() l = [] daemon.wait_or_die().addCallback(l.append) self.assertEqual(l, [None]) def test_simulate_broker_not_starting_up(self): """ When a daemon repeatedly dies, the watchdog gives up entirely and shuts down. """ self.log_helper.ignore_errors("Can't keep landscape-broker running. " "Exiting.") output_filename = self.makeFile("NOT RUN") self.makeFile("#!/bin/sh\necho RUN >> %s" % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) def got_result(result): self.assertEqual(len(list(open(output_filename))), MAXIMUM_CONSECUTIVE_RESTARTS) self.assertTrue("Can't keep landscape-broker running." in self.logfile.getvalue()) reactor_mock = self.mocker.proxy(reactor, passthrough=True) reactor_mock.stop() self.mocker.replay() result = Deferred() result.addCallback(lambda x: self.daemon.stop()) result.addCallback(got_result) reactor.callLater(1, result.callback, None) daemon = self.get_daemon(reactor=reactor_mock) daemon.start() return result def test_simulate_broker_not_starting_up_with_delay(self): """ The watchdog won't shutdown entirely when a daemon dies repeatedly as long as it is not dying too quickly. """ # This test hacks the first time() call to make it return a timestamp # that happend a while ago, and so give the impression that some time # has passed and it's fine to restart more times again. self.log_helper.ignore_errors("Can't keep landscape-broker running. " "Exiting.") output_filename = self.makeFile("NOT RUN") self.makeFile("#!/bin/sh\necho RUN >> %s" % output_filename, path=self.exec_name) os.chmod(self.exec_name, 0755) def got_result(result): # Pay attention to the +1 bellow. It's the reason for this test. self.assertEqual(len(list(open(output_filename))), MAXIMUM_CONSECUTIVE_RESTARTS + 1) self.assertTrue("Can't keep landscape-broker running." in self.logfile.getvalue()) result = Deferred() result.addCallback(lambda x: self.daemon.stop()) result.addCallback(got_result) reactor_mock = self.mocker.proxy(reactor, passthrough=True) reactor_mock.stop() # Make the *first* call to time return 0, so that it will try one # more time, and exercise the burst protection system. time_mock = self.mocker.replace("time.time") self.expect(time_mock()).result(time.time() - RESTART_BURST_DELAY) self.expect(time_mock()).passthrough().count(0, None) self.mocker.replay() # It's important to call start() shortly after the mocking above, # as we don't want anyone else getting the fake time. daemon = self.get_daemon(reactor=reactor_mock) daemon.start() reactor.callLater(1, result.callback, None) return result def test_is_not_running(self): result = self.daemon.is_running() result.addCallback(self.assertFalse) return result def test_spawn_process_with_uid(self): """ When the current UID as reported by os.getuid is not the uid of the username of the daemon, the watchdog explicitly switches to the uid of the username of the daemon. It also specifies the gid as the primary group of that user. """ self.makeFile("", path=self.exec_name) getuid = self.mocker.replace("os.getuid") getpwnam = self.mocker.replace("pwd.getpwnam") reactor = self.mocker.mock() self.expect(getuid()).result(0) info = getpwnam("landscape") self.expect(info.pw_uid).result(123) self.expect(info.pw_gid).result(456) self.expect(info.pw_dir).result("/var/lib/landscape") env = os.environ.copy() env["HOME"] = "/var/lib/landscape" env["USER"] = "landscape" env["LOGNAME"] = "landscape" reactor.spawnProcess(ARGS, KWARGS, env=env, uid=123, gid=456) self.mocker.replay() daemon = self.get_daemon(reactor=reactor) daemon.start() def test_spawn_process_without_root(self): """ If the watchdog is not running as root, no uid or gid switching will occur. """ self.makeFile("", path=self.exec_name) getuid = self.mocker.replace("os.getuid") reactor = self.mocker.mock() self.expect(getuid()).result(555) reactor.spawnProcess(ARGS, KWARGS, uid=None, gid=None) self.mocker.replay() daemon = self.get_daemon(reactor=reactor) daemon.start() def test_spawn_process_same_uid(self): """ If the daemon is specified to run as root, and the watchdog is running as root, no uid or gid switching will occur. """ self.makeFile("", path=self.exec_name) getuid = self.mocker.replace("os.getuid") self.expect(getuid()).result(0) getgid = self.mocker.replace("os.getgid") self.expect(getgid()).result(0) reactor = self.mocker.mock() reactor.spawnProcess(ARGS, KWARGS, uid=None, gid=None) self.mocker.replay() daemon = self.get_daemon(reactor=reactor, username="root") daemon.start() def test_request_exit(self): """The request_exit() method calls exit() on the broker process.""" output_filename = self.makeFile("NOT CALLED") socket_filename = os.path.join(self.config.sockets_path, "broker.sock") broker_filename = self.makeFile(STUB_BROKER % {"executable": sys.executable, "path": sys.path, "output_filename": output_filename, "socket": socket_filename}) os.chmod(broker_filename, 0755) process_result = getProcessOutput(broker_filename, env=os.environ, errortoo=True) # Wait until the process starts up, trying the call a few times. self.daemon.factor = 2.8 self.daemon.request_exit() def got_result(result): self.assertEqual(result, "") self.assertEqual(open(output_filename).read(), "CALLED") return process_result.addCallback(got_result) def test_request_exit_without_broker(self): """ The request_exit method returns False when the broker can't be contacted. """ result = self.daemon.request_exit() return self.assertSuccess(result, False) class DaemonBrokerTest(DaemonTestBase): helpers = [FakeBrokerServiceHelper] @property def connector_factory(self): return RemoteBrokerConnector def test_is_running(self): self.daemon._connector._reactor = self.broker_service.reactor result = self.daemon.is_running() result.addCallback(self.assertTrue) return result class WatchDogOptionsTest(LandscapeTest): def setUp(self): super(WatchDogOptionsTest, self).setUp() self.config = WatchDogConfiguration() self.config.default_config_filenames = [] def test_daemon(self): self.config.load(["--daemon"]) self.assertTrue(self.config.daemon) def test_daemon_default(self): self.config.load([]) self.assertFalse(self.config.daemon) def test_pid_file(self): self.config.load(["--pid-file", "wubble.txt"]) self.assertEqual(self.config.pid_file, "wubble.txt") def test_pid_file_default(self): self.config.load([]) self.assertEqual(self.config.pid_file, None) def test_monitor_only(self): self.config.load(["--monitor-only"]) self.assertEqual(self.config.get_enabled_daemons(), [Broker, Monitor]) def test_default_daemons(self): self.config.load([]) self.assertEqual(self.config.get_enabled_daemons(), [Broker, Monitor, Manager]) class WatchDogServiceTest(LandscapeTest): def setUp(self): super(WatchDogServiceTest, self).setUp() self.configuration = WatchDogConfiguration() self.data_path = self.makeDir() self.log_dir = self.makeDir() self.config_filename = self.makeFile("[client]\n") self.configuration.load(["--config", self.config_filename, "--data-path", self.data_path, "--log-dir", self.log_dir]) def test_daemonize(self): self.mocker.order() watchdog = self.mocker.patch(WatchDog) watchdog.check_running() self.mocker.result(succeed([])) daemonize = self.mocker.replace("landscape.watchdog.daemonize", passthrough=False) daemonize() watchdog.start() self.mocker.result(succeed(None)) self.mocker.replay() self.configuration.daemon = True service = WatchDogService(self.configuration) service.startService() def test_pid_file(self): pid_file = self.makeFile() watchdog = self.mocker.patch(WatchDog) watchdog.check_running() self.mocker.result(succeed([])) daemonize = self.mocker.replace("landscape.watchdog.daemonize", passthrough=False) daemonize() watchdog.start() self.mocker.result(succeed(None)) self.mocker.replay() self.configuration.daemon = True self.configuration.pid_file = pid_file service = WatchDogService(self.configuration) service.startService() self.assertEqual(int(open(pid_file, "r").read()), os.getpid()) def test_dont_write_pid_file_until_we_really_start(self): """ If the client can't be started because another client is still running, the client shouldn't be daemonized and the pid file shouldn't be written. """ self.log_helper.ignore_errors( "ERROR: The following daemons are already running: program-name") pid_file = self.makeFile() daemonize = self.mocker.replace("landscape.watchdog.daemonize", passthrough=False) daemonize() # daemonize should *not* be called self.mocker.count(0) watchdog = self.mocker.patch(WatchDog) watchdog.check_running() self.mocker.result(succeed([StubDaemon()])) watchdog.start() self.mocker.count(0) reactor = self.mocker.replace("twisted.internet.reactor", passthrough=True) reactor.crash() self.mocker.result(None) self.mocker.replay() self.configuration.daemon = True self.configuration.pid_file = pid_file service = WatchDogService(self.configuration) try: service.startService() self.mocker.verify() finally: self.mocker.reset() self.assertFalse(os.path.exists(pid_file)) def test_remove_pid_file(self): """ When the service is stopped, the pid file is removed. """ #don't really daemonize or request an exit daemonize = self.mocker.replace("landscape.watchdog.daemonize", passthrough=False) watchdog_factory = self.mocker.replace("landscape.watchdog.WatchDog", passthrough=False) watchdog = watchdog_factory(ARGS, KWARGS) watchdog.start() self.mocker.result(succeed(None)) watchdog.check_running() self.mocker.result(succeed([])) daemonize() watchdog.request_exit() self.mocker.result(succeed(None)) self.mocker.replay() pid_file = self.makeFile() self.configuration.daemon = True self.configuration.pid_file = pid_file service = WatchDogService(self.configuration) service.startService() self.assertEqual(int(open(pid_file).read()), os.getpid()) service.stopService() self.assertFalse(os.path.exists(pid_file)) def test_remove_pid_file_only_when_ours(self): #don't really request an exit watchdog = self.mocker.patch(WatchDog) watchdog.request_exit() self.mocker.result(succeed(None)) self.mocker.replay() pid_file = self.makeFile() self.configuration.pid_file = pid_file service = WatchDogService(self.configuration) open(pid_file, "w").write("abc") service.stopService() self.assertTrue(os.path.exists(pid_file)) def test_remove_pid_file_doesnt_explode_on_inaccessibility(self): pid_file = self.makeFile() # Make os.access say that the file isn't writable mock_os = self.mocker.replace("os") mock_os.access(pid_file, os.W_OK) self.mocker.result(False) watchdog = self.mocker.patch(WatchDog) watchdog.request_exit() self.mocker.result(succeed(None)) self.mocker.replay() self.configuration.pid_file = pid_file service = WatchDogService(self.configuration) open(pid_file, "w").write(str(os.getpid())) service.stopService() self.assertTrue(os.path.exists(pid_file)) def test_start_service_exits_when_already_running(self): self.log_helper.ignore_errors( "ERROR: The following daemons are already running: program-name") bootstrap_list_mock = self.mocker.patch(bootstrap_list) bootstrap_list_mock.bootstrap(data_path=self.data_path, log_dir=self.log_dir) service = WatchDogService(self.configuration) self.mocker.order() watchdog_mock = self.mocker.replace(service.watchdog) watchdog_mock.check_running() self.mocker.result(succeed([StubDaemon()])) reactor = self.mocker.replace("twisted.internet.reactor", passthrough=False) reactor.crash() self.mocker.replay() try: result = service.startService() self.mocker.verify() finally: self.mocker.reset() self.assertEqual(service.exit_code, 1) return result def test_start_service_exits_when_unknown_errors_occur(self): self.log_helper.ignore_errors(ZeroDivisionError) service = WatchDogService(self.configuration) bootstrap_list_mock = self.mocker.patch(bootstrap_list) bootstrap_list_mock.bootstrap(data_path=self.data_path, log_dir=self.log_dir) self.mocker.order() watchdog_mock = self.mocker.replace(service.watchdog) watchdog_mock.check_running() self.mocker.result(succeed([])) watchdog_mock.start() deferred = fail(ZeroDivisionError("I'm an unknown error!")) self.mocker.result(deferred) reactor = self.mocker.replace("twisted.internet.reactor", passthrough=False) reactor.crash() self.mocker.replay() try: result = service.startService() self.mocker.verify() finally: self.mocker.reset() self.assertEqual(service.exit_code, 2) return result def test_autodiscover_config_write_with_pubkey(self): """ When server_autodiscover is set True, and the config.ssl_public_key already exists, ensure we update and write the config file with the discovered server urls. """ self.configuration.server_autodiscover = True self.configuration.ssl_public_key = "/tmp/fakepubkey.ssl" service = WatchDogService(self.configuration) # Validate appropriate initial config options self.assertEquals("https://landscape.canonical.com/message-system", service._config.url) self.assertEquals("/tmp/fakepubkey.ssl", service._config.ssl_public_key) self.assertTrue(service._config.server_autodiscover) bootstrap_list_mock = self.mocker.patch(bootstrap_list) bootstrap_list_mock.bootstrap(data_path=self.data_path, log_dir=self.log_dir) self.mocker.order() discover_mock = self.mocker.replace(discover_server, passthrough=False) discover_mock(self.configuration.autodiscover_srv_query_string, self.configuration.autodiscover_a_query_string) self.mocker.result(succeed("fakehostname")) watchdog_mock = self.mocker.replace(service.watchdog) watchdog_mock.check_running() self.mocker.result(succeed([])) watchdog_mock.start() self.mocker.result(succeed(None)) self.mocker.replay() # trigger something to ensure autodiscover() is called service.startService() # Reload config to validate config.write() was called with changes config = Configuration() config.load(["--config", self.config_filename]) self.assertFalse(config.server_autodiscover) self.assertEquals("https://fakehostname/message-system", config.url) self.assertEquals("http://fakehostname/ping", config.ping_url) self.assertEquals("/tmp/fakepubkey.ssl", config.ssl_public_key) def test_autodiscover_config_write_without_pubkey(self): """ WatchDogService should attempt to fetch the custom CA cert from the discovered server if server_autodiscover=True and ssl_public_key is undefined. If the discovered server has a custom signed CA cert, that should be saved and its file path should be written to to configuration file. """ base64_cert = "base64: MTIzNDU2Nzg5MA==" # encoded from 1234567890 key_filename = os.path.join(self.data_path, os.path.basename(self.config_filename + ".ssl_public_key")) self.configuration.server_autodiscover = True service = WatchDogService(self.configuration) # Validate appropriate initial config options self.assertEquals(None, self.configuration.ssl_public_key) self.assertTrue(self.configuration.server_autodiscover) discover_mock = self.mocker.replace(discover_server, passthrough=False) discover_mock(self.configuration.autodiscover_srv_query_string, self.configuration.autodiscover_a_query_string) self.mocker.result(succeed("fakehostname")) fetch_ca_mock = self.mocker.replace( fetch_base64_ssl_public_certificate, passthrough=False) fetch_ca_mock("fakehostname", on_info=ANY, on_error=ANY) self.mocker.result(base64_cert) print_text_mock = self.mocker.replace(print_text) print_text_mock("Writing SSL CA certificate to %s..." % key_filename) watchdog_mock = self.mocker.replace(service.watchdog) watchdog_mock.check_running() self.mocker.result(succeed([])) watchdog_mock.start() self.mocker.result(succeed(None)) self.mocker.replay() service.startService() # Reload config file to validate config.write() was called with changes config = Configuration() config.load(["--config", self.config_filename]) self.assertFalse(config.server_autodiscover) self.assertEquals("https://fakehostname/message-system", config.url) self.assertEquals("http://fakehostname/ping", config.ping_url) self.assertEquals(key_filename, config.ssl_public_key) self.assertEqual("1234567890", open(key_filename, "r").read()) def test_autodiscover_config_write_without_pubkey_no_custom_ca(self): """ When server_autodiscover is set True, and the config does not have an ssl_public_key defined WatchDogService should attempt to fetch the custom CA cert from the discovered server. """ self.configuration.server_autodiscover = True service = WatchDogService(self.configuration) # Validate appropriate initial config options self.assertEquals(None, self.configuration.ssl_public_key) self.assertTrue(self.configuration.server_autodiscover) discover_mock = self.mocker.replace(discover_server, passthrough=False) discover_mock(self.configuration.autodiscover_srv_query_string, self.configuration.autodiscover_a_query_string) self.mocker.result(succeed("fakehostname")) fetch_ca_mock = self.mocker.replace( fetch_base64_ssl_public_certificate, passthrough=False) fetch_ca_mock("fakehostname", on_info=ANY, on_error=ANY) self.mocker.result("") # No Custom CA cert found watchdog_mock = self.mocker.replace(service.watchdog) watchdog_mock.check_running() self.mocker.result(succeed([])) watchdog_mock.start() self.mocker.result(succeed(None)) self.mocker.replay() service.startService() # Reload config file to validate config.write() was called with changes config = Configuration() config.load(["--config", self.config_filename]) self.assertFalse(config.server_autodiscover) self.assertEquals("https://fakehostname/message-system", config.url) self.assertEquals("http://fakehostname/ping", config.ping_url) self.assertEquals(None, config.ssl_public_key) def test_bootstrap(self): data_path = self.makeDir() log_dir = self.makeDir() def path(*suffix): return os.path.join(data_path, *suffix) getuid = self.mocker.replace("os.getuid") getuid() self.mocker.result(0) self.mocker.count(1, None) getpwnam = self.mocker.replace("pwd.getpwnam") value = getpwnam("landscape") self.mocker.count(1, None) value.pw_uid self.mocker.result(1234) self.mocker.count(1, None) getgrnam = self.mocker.replace("grp.getgrnam") value = getgrnam("root") self.mocker.count(1, None) value.gr_gid self.mocker.result(5678) self.mocker.count(1, None) chown = self.mocker.replace("os.chown") chown(path(), 1234, 5678) chown(path("messages"), 1234, 5678) chown(path("sockets"), 1234, 5678) chown(path("package"), 1234, 5678) chown(path("package/hash-id"), 1234, 5678) chown(path("package/binaries"), 1234, 5678) chown(path("package/upgrade-tool"), 1234, 5678) chown(path("custom-graph-scripts"), 1234, 5678) chown(path("package/database"), 1234, 5678) chown(log_dir, 1234, 5678) self.mocker.replay() bootstrap_list.bootstrap(data_path=data_path, log_dir=log_dir) self.assertTrue(os.path.isdir(path())) self.assertTrue(os.path.isdir(path("package"))) self.assertTrue(os.path.isdir(path("messages"))) self.assertTrue(os.path.isdir(path("custom-graph-scripts"))) self.assertTrue(os.path.isdir(log_dir)) self.assertTrue(os.path.isfile(path("package/database"))) def mode(*suffix): return stat.S_IMODE(os.stat(path(*suffix)).st_mode) self.assertEqual(mode(), 0755) self.assertEqual(mode("messages"), 0755) self.assertEqual(mode("package"), 0755) self.assertEqual(mode("package/hash-id"), 0755) self.assertEqual(mode("package/binaries"), 0755) self.assertEqual(mode("sockets"), 0750) self.assertEqual(mode("custom-graph-scripts"), 0755) self.assertEqual(mode("package/database"), 0644) def test_log_notification(self): """ SIGUSR1 should cause logs to be reopened. """ logging.getLogger().addHandler(logging.FileHandler(self.makeFile())) WatchDogService(self.configuration) # We expect the Watchdog to delegate to each of the sub-processes daemon_mock = self.mocker.patch(Daemon) daemon_mock.rotate_logs() self.mocker.count(3) self.mocker.replay() # Store the initial set of handlers original_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] # We fire the signal os.kill(os.getpid(), signal.SIGUSR1) def check(ign): new_streams = [handler.stream for handler in logging.getLogger().handlers if isinstance(handler, logging.FileHandler)] for stream in new_streams: self.assertTrue(stream not in original_streams) # We need to give some room for the callFromThread to run d = deferLater(reactor, 0, lambda: None) return d.addCallback(check) STUB_BROKER = """\ #!%(executable)s import sys import warnings warnings.filterwarnings("ignore", "Python C API version mismatch", RuntimeWarning) from twisted.internet import reactor sys.path = %(path)r from landscape.lib.amp import MethodCallServerFactory from landscape.broker.server import BrokerServer from landscape.amp import get_remote_methods class StubBroker(object): def exit(self): file = open(%(output_filename)r, "w") file.write("CALLED") file.close() reactor.callLater(1, reactor.stop) stub_broker = StubBroker() methods = get_remote_methods(BrokerServer) factory = MethodCallServerFactory(stub_broker, methods) reactor.listenUNIX(%(socket)r, factory) reactor.run() """ class FakeReactor(Clock): running = False def run(self): self.running = True class WatchDogRunTests(LandscapeTest): helpers = [EnvironSaverHelper] def test_non_root(self): """ The watchdog should print an error message and exit if run by a normal user. """ self.mocker.replace("os.getuid")() self.mocker.count(1, None) self.mocker.result(1000) getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("landscape").pw_uid self.mocker.result(1001) self.mocker.replay() sys_exit = self.assertRaises(SystemExit, run, ["landscape-client"]) self.assertIn("landscape-client can only be run" " as 'root' or 'landscape'.", str(sys_exit)) def test_landscape_user(self): """ The watchdog *can* be run as the 'landscape' user. """ getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("landscape").pw_uid self.mocker.result(os.getuid()) self.mocker.replay() reactor = FakeReactor() run(["--log-dir", self.makeFile()], reactor=reactor) self.assertTrue(reactor.running) def test_no_landscape_user(self): """ The watchdog should print an error message and exit if the 'landscape' user doesn't exist. """ getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("landscape") self.mocker.throw(KeyError()) self.mocker.replay() sys_exit = self.assertRaises(SystemExit, run, ["landscape-client"]) self.assertIn("The 'landscape' user doesn't exist!", str(sys_exit)) def test_clean_environment(self): getpwnam = self.mocker.replace("pwd.getpwnam") getpwnam("landscape").pw_uid self.mocker.result(os.getuid()) self.mocker.replay() os.environ["DEBIAN_YO"] = "yo" os.environ["DEBCONF_YO"] = "yo" os.environ["LANDSCAPE_ATTACHMENTS"] = "some attachments" os.environ["MAIL"] = "/some/path" os.environ["UNRELATED"] = "unrelated" reactor = FakeReactor() run(["--log-dir", self.makeFile()], reactor=reactor) self.assertNotIn("DEBIAN_YO", os.environ) self.assertNotIn("DEBCONF_YO", os.environ) self.assertNotIn("LANDSCAPE_ATTACHMENTS", os.environ) self.assertNotIn("MAIL", os.environ) self.assertEqual(os.environ["UNRELATED"], "unrelated") landscape-client-14.01/landscape/manager/0000755000175000017500000000000012301414317020101 5ustar andreasandreaslandscape-client-14.01/landscape/manager/plugin.py0000644000175000017500000000373512301414317021761 0ustar andreasandreasfrom twisted.internet.defer import maybeDeferred from landscape.lib.log import log_failure from landscape.log import format_object from landscape.broker.client import BrokerClientPlugin # Protocol messages! Same constants are defined in the server. FAILED = 5 SUCCEEDED = 6 class ManagerPlugin(BrokerClientPlugin): @property def manager(self): """An alias for the C{client} attribute}.""" return self.client def call_with_operation_result(self, message, callable, *args, **kwargs): """Send an operation-result message after calling C{callable}. If the function returns normally, an operation-result indicating success will be sent. If the function raises an exception, an operation-result indicating failure will be sent. The function can also return a C{Deferred}, and the behavior above still applies. @param message: The original message. @param callable: The function to call to handle the message. C{args} and C{kwargs} are passed to it. """ deferred = maybeDeferred(callable, *args, **kwargs) def success(text): return SUCCEEDED, text def failure(failure): text = "%s: %s" % (failure.type.__name__, failure.value) msg = ("Error occured running message handler %s with " "args %r %r.", format_object(callable), args, kwargs) log_failure(failure, msg=msg) return FAILED, text def send((status, text)): result = {"type": "operation-result", "status": status, "operation-id": message["operation-id"]} if text: result["result-text"] = text return self.manager.broker.send_message( result, self._session_id, urgent=True) deferred.addCallback(success) deferred.addErrback(failure) deferred.addCallback(send) return deferred landscape-client-14.01/landscape/manager/customgraph.py0000644000175000017500000002110612301414317023007 0ustar andreasandreasimport os import time import logging from twisted.internet.defer import fail, DeferredList, succeed from landscape.lib.scriptcontent import generate_script_hash from landscape.accumulate import Accumulator from landscape.manager.plugin import ManagerPlugin from landscape.manager.scriptexecution import ( ProcessFailedError, ScriptRunnerMixin, ProcessTimeLimitReachedError, get_user_info, UnknownUserError) class StoreProxy(object): """ Persist-like interface to store graph-points into SQLite store. """ def __init__(self, store): self.store = store def get(self, key, default): graph_accumulate = self.store.get_graph_accumulate(key) if graph_accumulate: return graph_accumulate[1:] else: return default def set(self, key, value): self.store.set_graph_accumulate(key, value[0], value[1]) class InvalidFormatError(Exception): def __init__(self, value): self.value = value Exception.__init__(self, self._get_message()) def _get_message(self): return u"Failed to convert to number: '%s'" % self.value class NoOutputError(Exception): def __init__(self): Exception.__init__(self, u"Script did not output any value") class ProhibitedUserError(Exception): """ Raised when an attempt to run a script as a user that is not allowed. @ivar username: The username that was used """ def __init__(self, username): self.username = username Exception.__init__(self, self._get_message()) def _get_message(self): return (u"Custom graph cannot be run as user %s" % self.username) class CustomGraphPlugin(ManagerPlugin, ScriptRunnerMixin): """ Manage adding and deleting custom graph scripts, and then run the scripts in a loop. @param process_factory: The L{IReactorProcess} provider to run the process with. """ run_interval = 300 size_limit = 1000 time_limit = 10 message_type = "custom-graph" def __init__(self, process_factory=None, create_time=time.time): super(CustomGraphPlugin, self).__init__(process_factory) self._create_time = create_time self._data = {} self.do_send = True def register(self, registry): super(CustomGraphPlugin, self).register(registry) registry.register_message( "custom-graph-add", self._handle_custom_graph_add) registry.register_message( "custom-graph-remove", self._handle_custom_graph_remove) self._persist = StoreProxy(self.registry.store) self._accumulate = Accumulator(self._persist, self.run_interval) def _handle_custom_graph_remove(self, message): """ Handle remove custom-graph operation, deleting the custom graph scripts if found. """ graph_id = int(message["graph-id"]) graph = self.registry.store.get_graph(graph_id) if graph: filename = graph[1] os.unlink(filename) self.registry.store.remove_graph(graph_id) if graph_id in self._data: del self._data[graph_id] def _handle_custom_graph_add(self, message): """ Handle add custom-graph operation, which can also update an existing custom graph script. """ user = message["username"] shell = message["interpreter"] code = message["code"] graph_id = int(message["graph-id"]) data_path = self.registry.config.data_path scripts_directory = os.path.join(data_path, "custom-graph-scripts") filename = os.path.join( scripts_directory, "graph-%d" % (graph_id,)) if os.path.exists(filename): os.unlink(filename) try: uid, gid = get_user_info(user)[:2] except UnknownUserError: logging.error(u"Attempt to add graph with unknown user %s" % user) else: script_file = file(filename, "w") self.write_script_file( script_file, filename, shell, code, uid, gid) if graph_id in self._data: del self._data[graph_id] self.registry.store.add_graph(graph_id, filename, user) def _format_exception(self, e): return u"%s: %s" % (e.__class__.__name__, e.args[0]) def exchange(self, urgent=False): self.registry.broker.call_if_accepted( self.message_type, self.send_message, urgent) def send_message(self, urgent): if not self.do_send: return self.do_send = False graphs = list(self.registry.store.get_graphs()) for graph_id, filename, user in graphs: if graph_id not in self._data: if os.path.isfile(filename): script_hash = self._get_script_hash(filename) self._data[graph_id] = { "values": [], "error": u"", "script-hash": script_hash} message = {"type": self.message_type, "data": self._data} new_data = {} for graph_id, item in self._data.iteritems(): script_hash = item["script-hash"] new_data[graph_id] = { "values": [], "error": u"", "script-hash": script_hash} self._data = new_data self.registry.broker.send_message(message, self._session_id, urgent=urgent) def _handle_data(self, output, graph_id, now): if graph_id not in self._data: return try: data = float(output) except ValueError: if output: raise InvalidFormatError(output) else: raise NoOutputError() step_data = self._accumulate(now, data, graph_id) if step_data: self._data[graph_id]["values"].append(step_data) def _handle_error(self, failure, graph_id): if graph_id not in self._data: return if failure.check(ProcessFailedError): failure_value = failure.value.data.decode("utf-8") if failure.value.exit_code: failure_value = ("%s (process exited with code %d)" % (failure_value, failure.value.exit_code)) self._data[graph_id]["error"] = failure_value elif failure.check(ProcessTimeLimitReachedError): self._data[graph_id]["error"] = ( u"Process exceeded the %d seconds limit" % (self.time_limit,)) else: self._data[graph_id]["error"] = self._format_exception( failure.value) def _get_script_hash(self, filename): file_object = file(filename) script_content = file_object.read() file_object.close() return generate_script_hash(script_content) def run(self): """ Iterate all the custom graphs stored and then execute each script and handle the output. """ self.do_send = True graphs = list(self.registry.store.get_graphs()) if not graphs: # Shortcut to prevent useless call to call_if_accepted return succeed([]) return self.registry.broker.call_if_accepted( self.message_type, self._continue_run, graphs) def _continue_run(self, graphs): deferred_list = [] now = int(self._create_time()) for graph_id, filename, user in graphs: if os.path.isfile(filename): script_hash = self._get_script_hash(filename) else: script_hash = "" if graph_id not in self._data: self._data[graph_id] = { "values": [], "error": u"", "script-hash": script_hash} else: self._data[graph_id]["script-hash"] = script_hash try: uid, gid, path = get_user_info(user) except UnknownUserError, e: d = fail(e) d.addErrback(self._handle_error, graph_id) deferred_list.append(d) continue if not self.is_user_allowed(user): d = fail(ProhibitedUserError(user)) d.addErrback(self._handle_error, graph_id) deferred_list.append(d) continue if not os.path.isfile(filename): continue result = self._run_script( filename, uid, gid, path, {}, self.time_limit) result.addCallback(self._handle_data, graph_id, now) result.addErrback(self._handle_error, graph_id) deferred_list.append(result) return DeferredList(deferred_list) landscape-client-14.01/landscape/manager/usermanager.py0000644000175000017500000001454512301414317022775 0ustar andreasandreasimport logging from landscape.lib.encoding import encode_dict_if_needed from landscape.amp import ComponentConnector, ComponentPublisher, remote from landscape.user.management import UserManagement from landscape.manager.plugin import ManagerPlugin from landscape.monitor.usermonitor import RemoteUserMonitorConnector class UserManager(ManagerPlugin): name = "usermanager" def __init__(self, management=None, shadow_file="/etc/shadow"): self._management = management or UserManagement() self._shadow_file = shadow_file self._message_types = {"add-user": self._add_user, "edit-user": self._edit_user, "lock-user": self._lock_user, "unlock-user": self._unlock_user, "remove-user": self._remove_user, "add-group": self._add_group, "edit-group": self._edit_group, "remove-group": self._remove_group, "add-group-member": self._add_group_member, "remove-group-member": self._remove_group_member} self._publisher = None def register(self, registry): """ Schedule reactor events for generic L{Plugin} callbacks, user and group management operations, and resynchronization. """ super(UserManager, self).register(registry) self._registry = registry self._publisher = ComponentPublisher(self, self.registry.reactor, self.registry.config) self._publisher.start() for message_type in self._message_types: self._registry.register_message(message_type, self._message_dispatch) def stop(self): """Stop listening for incoming AMP connections.""" if self._publisher: self._publisher.stop() self._publisher = None @remote def get_locked_usernames(self): """Return a list of usernames with locked system accounts.""" locked_users = [] if self._shadow_file: try: shadow_file = open(self._shadow_file, "r") for line in shadow_file: parts = line.split(":") if len(parts) > 1: if parts[1].startswith("!"): locked_users.append(parts[0].strip()) except IOError, e: logging.error("Error reading shadow file. %s" % e) return locked_users def _message_dispatch(self, message): """Dispatch the given user-change request to the correct handler. @param message: The request we got from the server. """ user_monitor_connector = RemoteUserMonitorConnector( self.registry.reactor, self.registry.config) def detect_changes(user_monitor): self._user_monitor = user_monitor return user_monitor.detect_changes() result = user_monitor_connector.connect() result.addCallback(detect_changes) result.addCallback(self._perform_operation, message) result.addCallback(self._send_changes, message) result.addCallback(lambda x: user_monitor_connector.disconnect()) return result def _perform_operation(self, result, message): message_type = message["type"] message_method = self._message_types[message_type] message = encode_dict_if_needed(message) return self.call_with_operation_result(message, message_method, message) def _send_changes(self, result, message): return self._user_monitor.detect_changes(message["operation-id"]) def _add_user(self, message): """Run an C{add-user} operation.""" return self._management.add_user(message["username"], message["name"], message["password"], message["require-password-reset"], message["primary-group-name"], message["location"], message["work-number"], message["home-number"]) def _edit_user(self, message): """Run an C{edit-user} operation.""" return self._management.set_user_details( message["username"], password=message["password"], name=message["name"], location=message["location"], work_number=message["work-number"], home_number=message["home-number"], primary_group_name=message["primary-group-name"]) def _lock_user(self, message): """Run a C{lock-user} operation.""" return self._management.lock_user(message["username"]) def _unlock_user(self, message): """Run an C{unlock-user} operation.""" return self._management.unlock_user(message["username"]) def _remove_user(self, message): """Run a C{remove-user} operation.""" return self._management.remove_user(message["username"], message["delete-home"]) def _add_group(self, message): """Run an C{add-group} operation.""" return self._management.add_group(message["groupname"]) def _edit_group(self, message): """Run an C{edit-group} operation.""" return self._management.set_group_details(message["groupname"], message["new-name"]) def _add_group_member(self, message): """Run an C{add-group-member} operation.""" return self._management.add_group_member(message["username"], message["groupname"]) def _remove_group_member(self, message): """Run a C{remove-group-member} operation.""" return self._management.remove_group_member(message["username"], message["groupname"]) def _remove_group(self, message): """Run an C{remove-group} operation.""" return self._management.remove_group(message["groupname"]) class RemoteUserManagerConnector(ComponentConnector): component = UserManager landscape-client-14.01/landscape/manager/processkiller.py0000644000175000017500000000546212301414317023343 0ustar andreasandreasimport os import signal import logging from datetime import datetime from landscape.lib.process import ProcessInformation from landscape.manager.plugin import ManagerPlugin class ProcessNotFoundError(Exception): pass class ProcessMismatchError(Exception): pass class SignalProcessError(Exception): pass class ProcessKiller(ManagerPlugin): """ A management plugin that signals processes upon receiving a message from the server. """ def __init__(self, process_info=None): if process_info is None: process_info = ProcessInformation() self.process_info = process_info def register(self, registry): super(ProcessKiller, self).register(registry) registry.register_message("signal-process", self._handle_signal_process) def _handle_signal_process(self, message): self.call_with_operation_result(message, self.signal_process, message["pid"], message["name"], message["start-time"], message["signal"]) def signal_process(self, pid, name, start_time, signame): logging.info("Sending %s signal to the process with PID %d.", signame, pid) process_info = self.process_info.get_process_info(pid) if not process_info: start_time = datetime.utcfromtimestamp(start_time) message = ("The process %s with PID %d that started at %s UTC was " "not found") % (name, pid, start_time) raise ProcessNotFoundError(message) elif abs(process_info["start-time"] - start_time) > 2: # We don't check that the start time matches precisely because # the way we obtain boot times isn't very precise, and this may # cascade into having imprecise process start times. expected_time = datetime.utcfromtimestamp(start_time) actual_time = datetime.utcfromtimestamp(process_info["start-time"]) message = ("The process %s with PID %d that started at " "%s UTC was not found. A process with the same " "PID that started at %s UTC was found and not " "sent the %s signal") % (name, pid, expected_time, actual_time, signame) raise ProcessMismatchError(message) signum = getattr(signal, "SIG%s" % (signame,)) try: os.kill(pid, signum) except: # XXX Nothing is indicating what the problem was. message = ("Attempting to send the %s signal to the process " "%s with PID %d failed") % (signame, name, pid) raise SignalProcessError(message) landscape-client-14.01/landscape/manager/service.py0000644000175000017500000000431312301414317022114 0ustar andreasandreasfrom twisted.python.reflect import namedClass from landscape.service import LandscapeService, run_landscape_service from landscape.manager.config import ManagerConfiguration from landscape.broker.amp import RemoteBrokerConnector from landscape.amp import ComponentPublisher from landscape.manager.manager import Manager class ManagerService(LandscapeService): """ The core Twisted Service which creates and runs all necessary managing components when started. """ service_name = Manager.name def __init__(self, config): super(ManagerService, self).__init__(config) self.plugins = self.get_plugins() self.manager = Manager(self.reactor, self.config) self.publisher = ComponentPublisher(self.manager, self.reactor, self.config) def get_plugins(self): """Return instances of all the plugins enabled in the configuration.""" return [namedClass("landscape.manager.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in self.config.plugin_factories] def startService(self): """Start the manager service. This method does 3 things, in this order: - Start listening for connections on the manager socket. - Connect to the broker. - Add all configured plugins, that will in turn register themselves. """ super(ManagerService, self).startService() self.publisher.start() def start_plugins(broker): self.broker = broker self.manager.broker = broker for plugin in self.plugins: self.manager.add(plugin) return self.broker.register_client(self.service_name) self.connector = RemoteBrokerConnector(self.reactor, self.config) connected = self.connector.connect() return connected.addCallback(start_plugins) def stopService(self): """Stop the manager and close the connection with the broker.""" self.connector.disconnect() self.publisher.stop() super(ManagerService, self).stopService() def run(args): run_landscape_service(ManagerConfiguration, ManagerService, args) landscape-client-14.01/landscape/manager/manager.py0000644000175000017500000000100512301414317022061 0ustar andreasandreasfrom landscape.manager.store import ManagerStore from landscape.broker.client import BrokerClient # Protocol messages! Same constants are defined in the server. FAILED = 5 SUCCEEDED = 6 class Manager(BrokerClient): """Central point of integration for the Landscape Manager.""" name = "manager" def __init__(self, reactor, config): super(Manager, self).__init__(reactor) self.reactor = reactor self.config = config self.store = ManagerStore(self.config.store_filename) landscape-client-14.01/landscape/manager/fakepackagemanager.py0000644000175000017500000000357112301414317024236 0ustar andreasandreasimport random from landscape.manager.plugin import ManagerPlugin from landscape.manager.manager import SUCCEEDED class FakePackageManager(ManagerPlugin): run_interval = 1800 randint = random.randint def register(self, registry): super(FakePackageManager, self).register(registry) self.config = registry.config registry.register_message("change-packages", self.handle_change_packages) registry.register_message("change-package-locks", self.handle_change_package_locks) registry.register_message("release-upgrade", self.handle_release_upgrade) def _handle(self, response): delay = self.randint(30, 300) self.registry.reactor.call_later( delay, self.manager.broker.send_message, response, self._session_id, urgent=True) def handle_change_packages(self, message): response = {"type": "change-packages-result", "operation-id": message.get("operation-id"), "result-code": 1, "result-text": "OK done."} return self._handle(response) def handle_change_package_locks(self, message): response = {"type": "operation-result", "operation-id": message.get("operation-id"), "status": SUCCEEDED, "result-text": "Package locks successfully changed.", "result-code": 0} return self._handle(response) def handle_release_upgrade(self, message): response = {"type": "operation-result", "operation-id": message.get("operation-id"), "status": SUCCEEDED, "result-text": "Successful release upgrade.", "result-code": 0} return self._handle(response) landscape-client-14.01/landscape/manager/scriptexecution.py0000644000175000017500000003277212301414317023716 0ustar andreasandreas""" Functionality for running arbitrary shell scripts. @var ALL_USERS: A token indicating all users should be allowed. """ import os import pwd import tempfile import operator import shutil from twisted.internet.protocol import ProcessProtocol from twisted.internet.defer import ( Deferred, fail, inlineCallbacks, returnValue, succeed) from twisted.internet.error import ProcessDone from landscape import VERSION from landscape.constants import UBUNTU_PATH from landscape.lib.scriptcontent import build_script from landscape.lib.fetch import fetch_async, HTTPCodeError from landscape.lib.persist import Persist from landscape.lib.encoding import encode_if_needed from landscape.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED ALL_USERS = object() TIMEOUT_RESULT = 102 PROCESS_FAILED_RESULT = 103 FETCH_ATTACHMENTS_FAILED_RESULT = 104 class UnknownUserError(Exception): pass def get_user_info(username=None): uid = None gid = None path = None if username is not None: username_str = encode_if_needed(username) try: info = pwd.getpwnam(username_str) except KeyError: raise UnknownUserError(u"Unknown user '%s'" % username) uid = info.pw_uid gid = info.pw_gid path = info.pw_dir if not os.path.exists(path): path = "/" return (uid, gid, path) class ProcessTimeLimitReachedError(Exception): """ Raised when a process has been running for too long. @ivar data: The data that the process printed before reaching the time limit. """ def __init__(self, data): self.data = data class ProcessFailedError(Exception): """Raised when a process exits with a non-0 exit code. @ivar data: The data that the process printed before reaching the time limit. """ def __init__(self, data, exit_code): self.data = data self.exit_code = exit_code class UnknownInterpreterError(Exception): """Raised when the interpreter specified to run a script is invalid. @ivar interpreter: the interpreter specified for the script. """ def __init__(self, interpreter): self.interpreter = interpreter Exception.__init__(self, self._get_message()) def _get_message(self): return "Unknown interpreter: '%s'" % self.interpreter class ScriptRunnerMixin(object): """ @param process_factory: The L{IReactorProcess} provider to run the process with. """ def __init__(self, process_factory=None): if process_factory is None: from twisted.internet import reactor as process_factory self.process_factory = process_factory def is_user_allowed(self, user): allowed_users = self.registry.config.get_allowed_script_users() return allowed_users == ALL_USERS or user in allowed_users def write_script_file(self, script_file, filename, shell, code, uid, gid): # Chown and chmod it before we write the data in it - the script may # have sensitive content # It would be nice to use fchown(2) and fchmod(2), but they're not # available in python and using it with ctypes is pretty tedious, not # to mention we can't get errno. os.chmod(filename, 0700) if uid is not None: os.chown(filename, uid, gid) script_file.write(build_script(shell, code)) script_file.close() def _run_script(self, filename, uid, gid, path, env, time_limit): if uid == os.getuid(): uid = None if gid == os.getgid(): gid = None pp = ProcessAccumulationProtocol( self.registry.reactor, self.size_limit) self.process_factory.spawnProcess( pp, filename, uid=uid, gid=gid, path=path, env=env) if time_limit is not None: pp.schedule_cancel(time_limit) return pp.result_deferred class ScriptExecutionPlugin(ManagerPlugin, ScriptRunnerMixin): """A plugin which allows execution of arbitrary shell scripts. @ivar size_limit: The number of bytes at which to truncate process output. """ size_limit = 500000 def register(self, registry): super(ScriptExecutionPlugin, self).register(registry) registry.register_message( "execute-script", self._handle_execute_script) def _respond(self, status, data, opid, result_code=None): if not isinstance(data, unicode): # Let's decode result-text, replacing non-printable # characters data = data.decode("utf-8", "replace") message = {"type": "operation-result", "status": status, "result-text": data, "operation-id": opid} if result_code: message["result-code"] = result_code return self.registry.broker.send_message( message, self._session_id, True) def _handle_execute_script(self, message): opid = message["operation-id"] try: user = message["username"] if not self.is_user_allowed(user): return self._respond( FAILED, u"Scripts cannot be run as user %s." % (user,), opid) server_supplied_env = message.get("env", None) d = self.run_script(message["interpreter"], message["code"], time_limit=message["time-limit"], user=user, attachments=message["attachments"], server_supplied_env=server_supplied_env) d.addCallback(self._respond_success, opid) d.addErrback(self._respond_failure, opid) return d except Exception, e: self._respond(FAILED, self._format_exception(e), opid) raise def _format_exception(self, e): return u"%s: %s" % (e.__class__.__name__, e.args[0]) def _respond_success(self, data, opid): return self._respond(SUCCEEDED, data, opid) def _respond_failure(self, failure, opid): code = None if failure.check(ProcessTimeLimitReachedError): code = TIMEOUT_RESULT elif failure.check(ProcessFailedError): code = PROCESS_FAILED_RESULT elif failure.check(HTTPCodeError): code = FETCH_ATTACHMENTS_FAILED_RESULT return self._respond( FAILED, str(failure.value), opid, FETCH_ATTACHMENTS_FAILED_RESULT) if code is not None: return self._respond(FAILED, failure.value.data, opid, code) else: return self._respond(FAILED, str(failure), opid) @inlineCallbacks def _save_attachments(self, attachments, uid, gid, computer_id): root_path = self.registry.config.url.rsplit("/", 1)[0] + "/attachment/" attachment_dir = tempfile.mkdtemp() headers = {"User-Agent": "landscape-client/%s" % VERSION, "Content-Type": "application/octet-stream", "X-Computer-ID": computer_id} for filename, attachment_id in attachments.items(): if isinstance(attachment_id, str): # Backward compatible behavior data = attachment_id yield succeed(None) else: data = yield fetch_async( "%s%d" % (root_path, attachment_id), cainfo=self.registry.config.ssl_public_key, headers=headers) full_filename = os.path.join(attachment_dir, filename) attachment = file(full_filename, "wb") os.chmod(full_filename, 0600) if uid is not None: os.chown(full_filename, uid, gid) attachment.write(data) attachment.close() os.chmod(attachment_dir, 0700) if uid is not None: os.chown(attachment_dir, uid, gid) returnValue(attachment_dir) def run_script(self, shell, code, user=None, time_limit=None, attachments=None, server_supplied_env=None): """ Run a script based on a shell and the code. A file will be written with #! as the first line, as executable, and run as the given user. XXX: Handle the 'reboot' and 'killall landscape-client' commands gracefully. @param shell: The interpreter to use. @param code: The code to run. @param user: The username to run the process as. @param time_limit: The number of seconds to allow the process to run before killing it and failing the returned Deferred with a L{ProcessTimeLimitReachedError}. @param attachments: C{dict} of filename/data attached to the script. @return: A deferred that will fire with the data printed by the process or fail with a L{ProcessTimeLimitReachedError}. """ if not os.path.exists(shell.split()[0]): return fail( UnknownInterpreterError(shell)) uid, gid, path = get_user_info(user) fd, filename = tempfile.mkstemp() script_file = os.fdopen(fd, "w") self.write_script_file( script_file, filename, shell, code, uid, gid) env = {"PATH": UBUNTU_PATH, "USER": user or "", "HOME": path or ""} if server_supplied_env: env.update(server_supplied_env) old_umask = os.umask(0022) if attachments: persist = Persist( filename=os.path.join(self.registry.config.data_path, "broker.bpickle")) persist = persist.root_at("registration") computer_id = persist.get("secure-id") d = self._save_attachments(attachments, uid, gid, computer_id) else: d = succeed(None) def prepare_script(attachment_dir): if attachment_dir is not None: env["LANDSCAPE_ATTACHMENTS"] = attachment_dir return self._run_script( filename, uid, gid, path, env, time_limit) d.addCallback(prepare_script) return d.addBoth(self._cleanup, filename, env, old_umask) def _cleanup(self, result, filename, env, old_umask): try: os.unlink(filename) except: pass if "LANDSCAPE_ATTACHMENTS" in env: try: shutil.rmtree(env["LANDSCAPE_ATTACHMENTS"]) except: pass os.umask(old_umask) return result class ProcessAccumulationProtocol(ProcessProtocol): """A ProcessProtocol which accumulates output. @ivar size_limit: The number of bytes at which to truncate output. """ def __init__(self, reactor, size_limit): self.data = [] self.result_deferred = Deferred() self._cancelled = False self.size_limit = size_limit self.reactor = reactor self._scheduled_cancel = None def schedule_cancel(self, time_limit): self._scheduled_cancel = self.reactor.call_later( time_limit, self._cancel) def childDataReceived(self, fd, data): """Some data was received from the child. Add it to our buffer, as long as it doesn't go over L{size_limit} bytes. """ current_size = reduce(operator.add, map(len, self.data), 0) self.data.append(data[:self.size_limit - current_size]) def processEnded(self, reason): """Fire back the deferred. The deferred will be fired with the string of data received from the subprocess, or if the subprocess was cancelled, a L{ProcessTimeLimitReachedError} will be fired with data accumulated so far. """ exit_code = reason.value.exitCode data = "".join(self.data) if self._cancelled: self.result_deferred.errback(ProcessTimeLimitReachedError(data)) else: if self._scheduled_cancel is not None: scheduled = self._scheduled_cancel self._scheduled_cancel = None self.reactor.cancel_call(scheduled) if reason.check(ProcessDone): self.result_deferred.callback(data) else: self.result_deferred.errback(ProcessFailedError(data, exit_code)) def _cancel(self): """ Close filedescriptors, kill the process, and indicate that a L{ProcessTimeLimitReachedError} should be fired on the deferred. """ # Sometimes children of the shell we're killing won't die unless their # file descriptors are closed! For example, if /bin/sh -c "cat" is the # process, "cat" won't die when we kill its shell. I'm not sure if this # is really sufficient: maybe there's a way we can walk over all # children of the process we started and kill them all. for i in (0, 1, 2): self.transport.closeChildFD(i) self.transport.signalProcess("KILL") self._cancelled = True class ScriptExecution(ManagerPlugin): """ Meta-plugin wrapping ScriptExecutionPlugin and CustomGraphPlugin. """ def __init__(self): from landscape.manager.customgraph import CustomGraphPlugin self._script_execution = ScriptExecutionPlugin() self._custom_graph = CustomGraphPlugin() def register(self, registry): super(ScriptExecution, self).register(registry) self._script_execution.register(registry) self._custom_graph.register(registry) def exchange(self, urgent=False): self._custom_graph.exchange(urgent) landscape-client-14.01/landscape/manager/store.py0000644000175000017500000000540312301414317021611 0ustar andreasandreastry: import sqlite3 except ImportError: from pysqlite2 import dbapi2 as sqlite3 from landscape.package.store import with_cursor class ManagerStore(object): def __init__(self, filename): self._db = sqlite3.connect(filename) ensure_schema(self._db) @with_cursor def get_graph(self, cursor, graph_id): cursor.execute( "SELECT graph_id, filename, user FROM graph WHERE graph_id=?", (graph_id,)) return cursor.fetchone() @with_cursor def get_graphs(self, cursor): cursor.execute("SELECT graph_id, filename, user FROM graph") return cursor.fetchall() @with_cursor def add_graph(self, cursor, graph_id, filename, user): cursor.execute( "SELECT graph_id FROM graph WHERE graph_id=?", (graph_id,)) if cursor.fetchone(): cursor.execute( "UPDATE graph SET filename=?, user=? WHERE graph_id=?", (filename, user, graph_id)) else: cursor.execute( "INSERT INTO graph (graph_id, filename, user) VALUES (?, ?, ?)", (graph_id, filename, user)) @with_cursor def remove_graph(self, cursor, graph_id): cursor.execute("DELETE FROM graph WHERE graph_id=?", (graph_id,)) @with_cursor def set_graph_accumulate(self, cursor, graph_id, timestamp, value): cursor.execute( "SELECT graph_id, graph_timestamp, graph_value FROM " "graph_accumulate WHERE graph_id=?", (graph_id,)) graph_accumulate = cursor.fetchone() if graph_accumulate: cursor.execute( "UPDATE graph_accumulate SET graph_timestamp = ?, " "graph_value = ? WHERE graph_id=?", (timestamp, value, graph_id)) else: cursor.execute( "INSERT INTO graph_accumulate (graph_id, graph_timestamp, " "graph_value) VALUES (?, ?, ?)", (graph_id, timestamp, value)) @with_cursor def get_graph_accumulate(self, cursor, graph_id): cursor.execute( "SELECT graph_id, graph_timestamp, graph_value FROM " "graph_accumulate WHERE graph_id=?", (graph_id,)) return cursor.fetchone() def ensure_schema(db): cursor = db.cursor() try: cursor.execute("CREATE TABLE graph" " (graph_id INTEGER PRIMARY KEY," " filename TEXT NOT NULL, user TEXT)") cursor.execute("CREATE TABLE graph_accumulate" " (graph_id INTEGER PRIMARY KEY," " graph_timestamp INTEGER, graph_value FLOAT)") except sqlite3.OperationalError: cursor.close() db.rollback() else: cursor.close() db.commit() landscape-client-14.01/landscape/manager/haservice.py0000644000175000017500000001720612301414317022432 0ustar andreasandreasimport logging import os from twisted.python.failure import Failure from twisted.internet.utils import getProcessValue, getProcessOutputAndValue from twisted.internet.defer import succeed from landscape.lib.log import log_failure from landscape.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED class CharmScriptError(Exception): """ Raised when a charm-provided script fails with a non-zero exit code. @ivar script: the name of the failed script @ivar code: the exit code of the failed script """ def __init__(self, script, code): self.script = script self.code = code Exception.__init__(self, self._get_message()) def _get_message(self): return ("Failed charm script: %s exited with return code %d." % (self.script, self.code)) class RunPartsError(Exception): """ Raised when a charm-provided health script run-parts directory contains a health script that fails with a non-zero exit code. @ivar stderr: the stderr from the failed run-parts command """ def __init__(self, stderr): self.message = ("%s" % stderr.split(":")[1].strip()) Exception.__init__(self, self._get_message()) def _get_message(self): return "Failed charm script: %s." % self.message class HAService(ManagerPlugin): """ Plugin to manage this computer's active participation in a high-availability cluster. It depends on charms delivering both health scripts and cluster_add cluster_remove scripts to function. """ JUJU_UNITS_BASE = "/var/lib/juju/agents" CLUSTER_ONLINE = "add_to_cluster" CLUSTER_STANDBY = "remove_from_cluster" HEALTH_SCRIPTS_DIR = "health_checks.d" STATE_STANDBY = u"standby" STATE_ONLINE = u"online" def register(self, registry): super(HAService, self).register(registry) registry.register_message("change-ha-service", self.handle_change_ha_service) def _respond(self, status, data, operation_id): message = {"type": "operation-result", "status": status, "operation-id": operation_id} if data: message["result-text"] = data.decode("utf-8", "replace") return self.registry.broker.send_message( message, self._session_id, True) def _respond_success(self, data, message, operation_id): logging.info(message) return self._respond(SUCCEEDED, data, operation_id) def _respond_failure(self, failure, operation_id): """Handle exception failures.""" log_failure(failure) return self._respond(FAILED, failure.getErrorMessage(), operation_id) def _respond_failure_string(self, failure_string, operation_id): """Only handle string failures.""" logging.error(failure_string) return self._respond(FAILED, failure_string, operation_id) def _run_health_checks(self, scripts_path): """ Exercise any discovered health check scripts, will return a deferred success or fail. """ health_dir = os.path.join(scripts_path, self.HEALTH_SCRIPTS_DIR) if not os.path.exists(health_dir) or not os.listdir(health_dir): # No scripts, no problem message = ( "Skipping juju charm health checks. No scripts at %s." % health_dir) logging.info(message) return succeed(message) def parse_output((stdout_data, stderr_data, status)): if status != 0: raise RunPartsError(stderr_data) else: return "All health checks succeeded." result = getProcessOutputAndValue( "run-parts", [health_dir], env=os.environ) return result.addCallback(parse_output) def _change_cluster_participation(self, _, scripts_path, service_state): """ Enables or disables a unit's participation in a cluster based on running charm-delivered CLUSTER_ONLINE and CLUSTER_STANDBY scripts if they exist. If the charm doesn't deliver scripts, return succeed(). """ if service_state == u"online": script_name = self.CLUSTER_ONLINE else: script_name = self.CLUSTER_STANDBY script = os.path.join(scripts_path, script_name) if not os.path.exists(script): logging.info("Ignoring juju charm cluster state change to '%s'. " "Charm script does not exist at %s." % (service_state, script)) return succeed( "This computer is always a participant in its high-availabilty" " cluster. No juju charm cluster settings changed.") def run_script(script): result = getProcessValue(script, env=os.environ) def validate_exit_code(code, script): if code != 0: raise CharmScriptError(script, code) else: return "%s succeeded." % script return result.addCallback(validate_exit_code, script) return run_script(script) def _perform_state_change(self, scripts_path, service_state, operation_id): """ Handle specific state change requests through calls to available charm scripts like C{CLUSTER_ONLINE}, C{CLUSTER_STANDBY} and any health check scripts. Assume success in any case where no scripts exist for a given task. """ d = succeed(None) if service_state == self.STATE_ONLINE: # Validate health of local service before we bring it online # in the HAcluster d = self._run_health_checks(scripts_path) d.addCallback( self._change_cluster_participation, scripts_path, service_state) return d def handle_change_ha_service(self, message): """Parse incoming change-ha-service messages""" operation_id = message["operation-id"] try: error_message = u"" service_name = message["service-name"] # keystone unit_name = message["unit-name"] # keystone/0 service_state = message["service-state"] # "online" | "standby" change_message = ( "%s high-availability service set to %s" % (service_name, service_state)) if service_state not in [self.STATE_STANDBY, self.STATE_ONLINE]: error_message = ( u"Invalid cluster participation state requested %s." % service_state) unit_path = "unit-" + unit_name.replace("/", "-") charm_path = os.path.join(self.JUJU_UNITS_BASE, unit_path, "charm") if not os.path.exists(self.JUJU_UNITS_BASE): error_message = ( u"This computer is not deployed with juju. " u"Changing high-availability service not supported.") elif not os.path.exists(charm_path): error_message = ( u"This computer is not juju unit %s. Unable to " u"modify high-availability services." % unit_name) if error_message: return self._respond_failure_string( error_message, operation_id) scripts_path = os.path.join(charm_path, "scripts") d = self._perform_state_change( scripts_path, service_state, operation_id) d.addCallback(self._respond_success, change_message, operation_id) d.addErrback(self._respond_failure, operation_id) return d except: self._respond_failure(Failure(), operation_id) return d landscape-client-14.01/landscape/manager/config.py0000644000175000017500000000463612301414317021731 0ustar andreasandreasimport os from landscape.deployment import Configuration from landscape.manager.scriptexecution import ALL_USERS ALL_PLUGINS = ["ProcessKiller", "PackageManager", "UserManager", "ShutdownManager", "AptSources", "HardwareInfo", "KeystoneToken", "HAService"] class ManagerConfiguration(Configuration): """Specialized configuration for the Landscape Manager.""" def make_parser(self): """ Specialize L{Configuration.make_parser}, adding many manager-specific options. """ parser = super(ManagerConfiguration, self).make_parser() parser.add_option("--manager-plugins", metavar="PLUGIN_LIST", help="Comma-delimited list of manager plugins to " "use. ALL means use all plugins.", default="ALL") parser.add_option("--include-manager-plugins", metavar="PLUGIN_LIST", help="Comma-delimited list of manager plugins to " "enable, in addition to the defaults.") parser.add_option("--script-users", metavar="USERS", help="Comma-delimited list of usernames that scripts" " may be run as. Default is to allow all " "users.") return parser @property def plugin_factories(self): plugin_names = [] if self.manager_plugins == "ALL": plugin_names = ALL_PLUGINS[:] elif self.manager_plugins: plugin_names = self.manager_plugins.split(",") if self.include_manager_plugins: plugin_names += self.include_manager_plugins.split(",") return [x.strip() for x in plugin_names] def get_allowed_script_users(self): """ Based on the C{script_users} configuration value, return the users that should be allowed to run scripts. If the value is "ALL", then L{landscape.manager.scriptexecution.ALL_USERS} will be returned. If there is no specified value, then C{nobody} will be allowed. """ if not self.script_users: return ["nobody"] if self.script_users.strip() == "ALL": return ALL_USERS return [x.strip() for x in self.script_users.split(",")] @property def store_filename(self): return os.path.join(self.data_path, "manager.database") landscape-client-14.01/landscape/manager/shutdownmanager.py0000644000175000017500000001307612301414317023670 0ustar andreasandreasimport logging from twisted.internet.defer import Deferred from twisted.internet.protocol import ProcessProtocol from twisted.internet.error import ProcessDone from landscape.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED class ShutdownFailedError(Exception): """Raised when a call to C{/sbin/shutdown} fails. @ivar data: The data that the process printed before failing. """ def __init__(self, data): self.data = data class ShutdownManager(ManagerPlugin): def __init__(self, process_factory=None): if process_factory is None: from twisted.internet import reactor as process_factory self._process_factory = process_factory def register(self, registry): """Add this plugin to C{registry}. The shutdown manager handles C{shutdown} activity messages broadcast from the server. """ super(ShutdownManager, self).register(registry) registry.register_message("shutdown", self.perform_shutdown) def perform_shutdown(self, message): """Request a system restart or shutdown. If the call to C{/sbin/shutdown} runs without errors the activity specified in the message will be responded as succeeded. Otherwise, it will be responded as failed. """ operation_id = message["operation-id"] reboot = message["reboot"] protocol = ShutdownProcessProtocol() protocol.set_timeout(self.registry.reactor) protocol.result.addCallback(self._respond_success, operation_id) protocol.result.addErrback(self._respond_failure, operation_id) command, args = self._get_command_and_args(protocol, reboot) self._process_factory.spawnProcess(protocol, command, args=args) def _respond_success(self, data, operation_id): logging.info("Shutdown request succeeded.") deferred = self._respond(SUCCEEDED, data, operation_id) # After sending the result to the server, stop accepting messages and # wait for the reboot/shutdown. deferred.addCallback( lambda _: self.registry.broker.stop_exchanger()) return deferred def _respond_failure(self, failure, operation_id): logging.info("Shutdown request failed.") return self._respond(FAILED, failure.value.data, operation_id) def _respond(self, status, data, operation_id): message = {"type": "operation-result", "status": status, "result-text": data, "operation-id": operation_id} return self.registry.broker.send_message( message, self._session_id, True) def _get_command_and_args(self, protocol, reboot): """ Returns a C{command, args} 2-tuple suitable for use with L{IReactorProcess.spawnProcess}. """ minutes = "+%d" % (protocol.delay // 60,) if reboot: args = ["/sbin/shutdown", "-r", minutes, "Landscape is rebooting the system"] else: args = ["/sbin/shutdown", "-h", minutes, "Landscape is shutting down the system"] return "/sbin/shutdown", args class ShutdownProcessProtocol(ProcessProtocol): """A ProcessProtocol for calling C{/sbin/shutdown}. C{shutdown} doesn't return immediately when a time specification is provided. Failures are reported immediately after it starts and return a non-zero exit code. The process protocol calls C{shutdown} and waits for failures for C{timeout} seconds. If no failures are reported it fires C{result}'s callback with whatever output was received from the process. If failures are reported C{result}'s errback is fired. @ivar result: A L{Deferred} fired when C{shutdown} fails or succeeds. @ivar reboot: A flag indicating whether a shutdown or reboot should be performed. Default is C{False}. @ivar delay: The time in seconds from now to schedule the shutdown. Default is 240 seconds. The time will be converted to minutes using integer division when passed to C{shutdown}. """ def __init__(self, reboot=False, delay=240): self.result = Deferred() self.reboot = reboot self.delay = delay self._data = [] self._waiting = True def get_data(self): """Get the data printed by the subprocess.""" return "".join(self._data) def set_timeout(self, reactor, timeout=10): """ Set the error checking timeout, after which C{result}'s callback will be fired. """ reactor.call_later(timeout, self._succeed) def childDataReceived(self, fd, data): """Some data was received from the child. Add it to our buffer to pass to C{result} when it's fired. """ if self._waiting: self._data.append(data) def processEnded(self, reason): """Fire back the C{result} L{Deferred}. C{result}'s callback will be fired with the string of data received from the subprocess, or if the subprocess failed C{result}'s errback will be fired with the string of data received from the subprocess. """ if self._waiting: if reason.check(ProcessDone): self._succeed() else: self.result.errback(ShutdownFailedError(self.get_data())) self._waiting = False def _succeed(self): """Fire C{result}'s callback with data accumulated from the process.""" if self._waiting: self.result.callback(self.get_data()) self._waiting = False landscape-client-14.01/landscape/manager/keystonetoken.py0000644000175000017500000000340012301414317023352 0ustar andreasandreasimport os import logging from ConfigParser import ConfigParser, NoOptionError from landscape.monitor.plugin import DataWatcher from landscape.lib.persist import Persist KEYSTONE_CONFIG_FILE = "/etc/keystone/keystone.conf" class KeystoneToken(DataWatcher): """ A plugin which pulls the admin_token from the keystone configuration file and sends it to the landscape server. """ message_type = "keystone-token" message_key = "data" run_interval = 60 * 15 scope = "openstack" def __init__(self, keystone_config_file=KEYSTONE_CONFIG_FILE): self._keystone_config_file = keystone_config_file def register(self, client): super(KeystoneToken, self).register(client) self._persist_filename = os.path.join(self.registry.config.data_path, "keystone.bpickle") self._persist = Persist(filename=self._persist_filename) self.registry.reactor.call_every(self.registry.config.flush_interval, self.flush) def _reset(self): """ Reset the persist. """ self._persist.remove("data") def flush(self): self._persist.save(self._persist_filename) def get_data(self): """ Return the Keystone administrative token. """ if not os.path.exists(self._keystone_config_file): return None config = ConfigParser() config.read(self._keystone_config_file) try: admin_token = config.get("DEFAULT", "admin_token") except NoOptionError: logging.error("KeystoneToken: No admin_token found in %s" % (self._keystone_config_file)) return None return admin_token landscape-client-14.01/landscape/manager/tests/0000755000175000017500000000000012301414317021243 5ustar andreasandreaslandscape-client-14.01/landscape/manager/tests/test_fakepackagemanager.py0000644000175000017500000000573412301414317026442 0ustar andreasandreasfrom landscape.manager.plugin import SUCCEEDED from landscape.manager.fakepackagemanager import FakePackageManager from landscape.tests.helpers import LandscapeTest, ManagerHelper class FakePackageManagerTest(LandscapeTest): """Tests for the fake package manager plugin.""" helpers = [ManagerHelper] def setUp(self): super(FakePackageManagerTest, self).setUp() self.package_manager = FakePackageManager() self.package_manager.randint = lambda x, y: 0 def test_handle_change_packages(self): """ L{FakePackageManager} is able to handle a C{change-packages} message, creating a C{change-packages-result} in response. """ self.manager.add(self.package_manager) service = self.broker_service service.message_store.set_accepted_types(["change-packages-result"]) message = {"type": "change-packages", "operation-id": 1} self.manager.dispatch_message(message) self.manager.reactor.advance(1) self.assertMessages(service.message_store.get_pending_messages(), [{"type": "change-packages-result", "result-text": "OK done.", "result-code": 1, "operation-id": 1}]) def test_handle_change_package_locks(self): """ L{FakePackageManager} is able to handle a C{change-package-locks} message, creating a C{operation-result} in response. """ self.manager.add(self.package_manager) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) message = {"type": "change-package-locks", "operation-id": 1} self.manager.dispatch_message(message) self.manager.reactor.advance(1) self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": "Package locks successfully changed.", "result-code": 0, "status": SUCCEEDED, "operation-id": 1}]) def test_handle_release_upgrade(self): """ L{FakePackageManager} is able to handle a C{release-upgrade} message, creating a C{operation-result} in response. """ self.manager.add(self.package_manager) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) message = {"type": "release-upgrade", "operation-id": 1} self.manager.dispatch_message(message) self.manager.reactor.advance(1) self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": "Successful release upgrade.", "result-code": 0, "status": SUCCEEDED, "operation-id": 1}]) landscape-client-14.01/landscape/manager/tests/test_keystonetoken.py0000644000175000017500000001233412301414317025561 0ustar andreasandreasimport os from landscape.tests.helpers import LandscapeTest from landscape.manager.keystonetoken import KeystoneToken from landscape.tests.helpers import ManagerHelper, FakePersist class KeystoneTokenTest(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(KeystoneTokenTest, self).setUp() self.keystone_file = os.path.join(self.makeDir(), "keystone.conf") self.plugin = KeystoneToken(self.keystone_file) def test_get_keystone_token_nonexistent(self): """ The plugin provides no data when the keystone configuration file doesn't exist. """ self.assertIs(None, self.plugin.get_data()) def test_get_keystone_token_empty(self): """ The plugin provides no data when the keystone configuration file is empty. """ self.log_helper.ignore_errors("KeystoneToken: No admin_token found .*") self.makeFile(path=self.keystone_file, content="") self.assertIs(None, self.plugin.get_data()) def test_get_keystone_token_no_admin_token(self): """ The plugin provides no data when the keystone configuration doesn't have an admin_token field. """ self.log_helper.ignore_errors("KeystoneToken: No admin_token found .*") self.makeFile(path=self.keystone_file, content="[DEFAULT]") self.assertIs(None, self.plugin.get_data()) def test_get_keystone_token(self): """ Finally! Some data is actually there! """ self.makeFile( path=self.keystone_file, content="[DEFAULT]\nadmin_token = foobar") self.assertEqual("foobar", self.plugin.get_data()) def test_get_keystone_token_non_utf8(self): """ The data can be arbitrary bytes. """ content = "[DEFAULT]\nadmin_token = \xff" self.makeFile( path=self.keystone_file, content=content) self.assertEqual("\xff", self.plugin.get_data()) def test_get_message(self): """ L{KeystoneToken.get_message} only returns a message when the keystone token has changed. """ self.makeFile( path=self.keystone_file, content="[DEFAULT]\nadmin_token = foobar") self.plugin.register(self.manager) message = self.plugin.get_message() self.assertEqual( {'type': 'keystone-token', 'data': 'foobar'}, message) message = self.plugin.get_message() self.assertIs(None, message) def test_flush_persists_data_to_disk(self): """ The plugin's C{flush} method is called every C{flush_interval} and creates the perists file. """ flush_interval = self.config.flush_interval persist_filename = os.path.join(self.config.data_path, "keystone.bpickle") self.assertFalse(os.path.exists(persist_filename)) self.manager.add(self.plugin) self.reactor.advance(flush_interval) self.assertTrue(os.path.exists(persist_filename)) def test_resynchronize_message_calls_reset_method(self): """ If the reactor fires a "resynchronize", with 'openstack' scope, the C{_reset} method on the keystone plugin object is called. """ self.manager.add(self.plugin) self.plugin._persist = FakePersist() openstack_scope = ["openstack"] self.reactor.fire("resynchronize", openstack_scope) self.assertTrue(self.plugin._persist.called) def test_resynchronize_gets_new_session_id(self): """ If L{KeystoneToken} reacts to a "resynchronize" event it should get a new session id as part of the process. """ self.manager.add(self.plugin) session_id = self.plugin._session_id self.plugin._persist = FakePersist() self.plugin.client.broker.message_store.drop_session_ids() self.reactor.fire("resynchronize") self.assertNotEqual(session_id, self.plugin._session_id) def test_resynchronize_with_global_scope(self): """ If the reactor fires a "resynchronize", with global scope, we act as if it had 'openstack' scope. """ self.manager.add(self.plugin) self.plugin._persist = FakePersist() self.reactor.fire("resynchronize") self.assertTrue(self.plugin._persist.called) def test_do_not_resynchronize_with_other_scope(self): """ If the reactor fires a "resynchronize", with an irrelevant scope, we do nothing. """ self.manager.add(self.plugin) self.plugin._persist = FakePersist() disk_scope = ["disk"] self.reactor.fire("resynchronize", disk_scope) self.assertFalse(self.plugin._persist.called) def test_send_message_with_no_data(self): """ If the plugin could not extract the C{admin_token} from the Keystone config file, upon exchange, C{None} is returned. """ self.makeFile(path=self.keystone_file, content="[DEFAULT]\nadmin_token =") self.manager.add(self.plugin) def check(result): self.assertIs(None, result) return self.plugin.exchange().addCallback(check) landscape-client-14.01/landscape/manager/tests/test_plugin.py0000644000175000017500000000776312301414317024167 0ustar andreasandreasfrom twisted.internet.defer import Deferred from landscape.tests.helpers import LandscapeTest from landscape.tests.helpers import ManagerHelper from landscape.manager.plugin import ManagerPlugin, SUCCEEDED, FAILED class BrokerPluginTest(LandscapeTest): helpers = [ManagerHelper] def test_call_with_operation_result_success(self): """ A helper method exists which calls a function and sends an operation-result message based on the success of that method. """ plugin = ManagerPlugin() plugin.register(self.manager) broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 12312} operation = lambda: None def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 12312}]) result = plugin.call_with_operation_result(message, operation) return result.addCallback(assert_messages) def test_call_with_operation_result_error(self): """ The helper for operation-results sends an appropriate message when an exception is raised from the given method. """ self.log_helper.ignore_errors(RuntimeError) plugin = ManagerPlugin() plugin.register(self.manager) broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 12312} def operation(): raise RuntimeError("What the crap!") def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": FAILED, "result-text": "RuntimeError: What the " "crap!", "operation-id": 12312}]) logdata = self.logfile.getvalue() self.assertTrue("RuntimeError: What the crap!" in logdata, logdata) result = plugin.call_with_operation_result(message, operation) return result.addCallback(assert_messages) def test_call_with_operation_result_exchanges_urgently(self): """ Operation results are reported to the server as quickly as possible. """ plugin = ManagerPlugin() plugin.register(self.manager) broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 123} operation = lambda: None def assert_urgency(ignored): self.assertTrue(broker_service.exchanger.is_urgent()) result = plugin.call_with_operation_result(message, operation) return result.addCallback(assert_urgency) def test_callable_returning_a_deferred(self): """ The callable parameter can return a C{Deferred}. """ plugin = ManagerPlugin() plugin.register(self.manager) broker_service = self.broker_service broker_service.message_store.set_accepted_types(["operation-result"]) message = {"operation-id": 12312} deferred = Deferred() operation = lambda: deferred def assert_messages(ignored): messages = broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "result-text": "blah", "status": SUCCEEDED, "operation-id": 12312}]) result = plugin.call_with_operation_result(message, operation) result.addCallback(assert_messages) deferred.callback("blah") return result landscape-client-14.01/landscape/manager/tests/test_store.py0000644000175000017500000000423312301414317024012 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.manager.store import ManagerStore class ManagerStoreTest(LandscapeTest): def setUp(self): super(ManagerStoreTest, self).setUp() self.filename = self.makeFile() self.store = ManagerStore(self.filename) self.store.add_graph(1, u"file 1", u"user1") self.store.set_graph_accumulate(1, 1234, 1.0) def test_get_unknown_graph(self): graph = self.store.get_graph(1000) self.assertIdentical(graph, None) def test_get_graph(self): graph = self.store.get_graph(1) self.assertEqual(graph, (1, u"file 1", u"user1")) def test_get_graphs(self): graphs = self.store.get_graphs() self.assertEqual(graphs, [(1, u"file 1", u"user1")]) def test_get_no_graphs(self): self.store.remove_graph(1) graphs = self.store.get_graphs() self.assertEqual(graphs, []) def test_add_graph(self): self.store.add_graph(2, u"file 2", u"user2") graph = self.store.get_graph(2) self.assertEqual(graph, (2, u"file 2", u"user2")) def test_add_update_graph(self): self.store.add_graph(1, u"file 2", u"user2") graph = self.store.get_graph(1) self.assertEqual(graph, (1, u"file 2", u"user2")) def test_remove_graph(self): self.store.remove_graph(1) graphs = self.store.get_graphs() self.assertEqual(graphs, []) def test_remove_unknown_graph(self): self.store.remove_graph(2) graphs = self.store.get_graphs() self.assertEqual(graphs, [(1, u"file 1", u"user1")]) def test_get_accumulate_unknown_graph(self): accumulate = self.store.get_graph_accumulate(2) self.assertIdentical(accumulate, None) def test_set_accumulate_graph(self): self.store.set_graph_accumulate(2, 1234, 2.0) accumulate = self.store.get_graph_accumulate(2) self.assertEqual(accumulate, (2, 1234, 2.0)) def test_update_accumulate_graph(self): self.store.set_graph_accumulate(1, 4567, 2.0) accumulate = self.store.get_graph_accumulate(1) self.assertEqual(accumulate, (1, 4567, 2.0)) landscape-client-14.01/landscape/manager/tests/test_packagemanager.py0000644000175000017500000003131312301414317025603 0ustar andreasandreasimport os from twisted.internet.defer import Deferred from landscape.package.changer import find_changer_command, PackageChanger from landscape.package.releaseupgrader import ( ReleaseUpgrader, find_release_upgrader_command) from landscape.package.store import PackageStore from landscape.manager.packagemanager import PackageManager from landscape.tests.helpers import ( LandscapeTest, EnvironSaverHelper, ManagerHelper) class PackageManagerTest(LandscapeTest): """Tests for the package manager plugin.""" helpers = [EnvironSaverHelper, ManagerHelper] def setUp(self): """Initialize test helpers and create a sample package store.""" super(PackageManagerTest, self).setUp() self.package_store = PackageStore(os.path.join(self.data_path, "package/database")) self.package_manager = PackageManager() def test_create_default_store_upon_message_handling(self): """ If the package sqlite database file doesn't exist yet, it is created upon message handling. """ filename = os.path.join(self.broker_service.config.data_path, "package/database") os.unlink(filename) self.assertFalse(os.path.isfile(filename)) self.manager.add(self.package_manager) self.package_manager.spawn_handler = lambda x: None message = {"type": "release-upgrade"} self.package_manager.handle_release_upgrade(message) self.assertTrue(os.path.isfile(filename)) def test_dont_spawn_changer_if_message_not_accepted(self): """ The L{PackageManager} spawns a L{PackageChanger} run only if the appropriate message type is accepted. """ self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.count(0) self.mocker.replay() return self.package_manager.run() def test_dont_spawn_release_upgrader_if_message_not_accepted(self): """ The L{PackageManager} spawns a L{ReleaseUpgrader} run only if the appropriate message type is accepted. """ self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(ReleaseUpgrader) self.mocker.count(0) self.mocker.replay() return self.package_manager.run() def test_spawn_handler_on_registration_when_already_accepted(self): package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) # Slightly tricky as we have to wait for the result of run(), # but we don't have its deferred yet. To handle it, we create # our own deferred, and register a callback for when run() # returns, chaining both deferreds at that point. deferred = Deferred() def run_has_run(run_result_deferred): return run_result_deferred.chainDeferred(deferred) package_manager_mock.run() self.mocker.passthrough(run_has_run) self.mocker.replay() service = self.broker_service service.message_store.set_accepted_types(["change-packages-result"]) self.manager.add(self.package_manager) return deferred def test_spawn_changer_on_run_if_message_accepted(self): """ The L{PackageManager} spawns a L{PackageChanger} run if messages of type C{"change-packages-result"} are accepted. """ service = self.broker_service service.message_store.set_accepted_types(["change-packages-result"]) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.count(2) # Once for registration, then again explicitly. self.mocker.replay() self.manager.add(self.package_manager) return self.package_manager.run() def test_run_on_package_data_changed(self): """ The L{PackageManager} spawns a L{PackageChanger} run if an event of type C{"package-data-changed"} is fired. """ service = self.broker_service service.message_store.set_accepted_types(["change-packages-result"]) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.count(2) # Once for registration, then again explicitly. self.mocker.replay() self.manager.add(self.package_manager) return self.manager.reactor.fire("package-data-changed")[0] def test_spawn_release_upgrader_on_run_if_message_accepted(self): """ The L{PackageManager} spawns a L{ReleaseUpgrader} run if messages of type C{"operation-result"} are accepted. """ service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(ReleaseUpgrader) self.mocker.count(2) # Once for registration, then again explicitly. self.mocker.replay() self.manager.add(self.package_manager) return self.package_manager.run() def test_change_packages_handling(self): self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.replay() message = {"type": "change-packages"} self.manager.dispatch_message(message) task = self.package_store.get_next_task("changer") self.assertTrue(task) self.assertEqual(task.data, message) def test_change_packages_handling_with_reboot(self): self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.replay() message = {"type": "change-packages", "reboot-if-necessary": True} self.manager.dispatch_message(message) task = self.package_store.get_next_task("changer") self.assertTrue(task) self.assertEqual(task.data, message) def test_release_upgrade_handling(self): """ The L{PackageManager.handle_release_upgrade} method is registered has handler for messages of type C{"release-upgrade"}, and queues a task in the appropriate queue. """ self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(ReleaseUpgrader) self.mocker.replay() message = {"type": "release-upgrade"} self.manager.dispatch_message(message) task = self.package_store.get_next_task("release-upgrader") self.assertTrue(task) self.assertEqual(task.data, message) def test_spawn_changer(self): """ The L{PackageManager.spawn_handler} method executes the correct command when passed the L{PackageChanger} class as argument. """ command = self.makeFile("#!/bin/sh\necho 'I am the changer!' >&2\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_changer_command) find_command_mock() self.mocker.result(command) self.mocker.replay() self.package_store.add_task("changer", "Do something!") self.manager.add(self.package_manager) result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertIn("I am the changer!", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_release_upgrader(self): """ The L{PackageManager.spawn_handler} method executes the correct command when passed the L{ReleaseUpgrader} class as argument. """ command = self.makeFile("#!/bin/sh\necho 'I am the upgrader!' >&2\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_release_upgrader_command) find_command_mock() self.mocker.result(command) self.mocker.replay() self.package_store.add_task("release-upgrader", "Do something!") self.manager.add(self.package_manager) result = self.package_manager.spawn_handler(ReleaseUpgrader) def got_result(result): log = self.logfile.getvalue() self.assertIn("I am the upgrader!", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_handler_without_output(self): find_command_mock = self.mocker.replace(find_changer_command) find_command_mock() self.mocker.result("/bin/true") self.mocker.replay() self.package_store.add_task("changer", "Do something!") self.manager.add(self.package_manager) result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertNotIn("changer output", log) return result.addCallback(got_result) def test_spawn_handler_copies_environment(self): command = self.makeFile("#!/bin/sh\necho VAR: $VAR\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_changer_command) find_command_mock() self.mocker.result(command) self.mocker.replay() self.manager.add(self.package_manager) self.package_store.add_task("changer", "Do something!") os.environ["VAR"] = "HI!" result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertIn("VAR: HI!", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_handler_passes_quiet_option(self): command = self.makeFile("#!/bin/sh\necho OPTIONS: $@\n") os.chmod(command, 0755) find_command_mock = self.mocker.replace(find_changer_command) find_command_mock() self.mocker.result(command) self.mocker.replay() self.manager.add(self.package_manager) self.package_store.add_task("changer", "Do something!") result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertIn("OPTIONS: --quiet", log) self.assertNotIn(command, log) return result.addCallback(got_result) def test_spawn_handler_wont_run_without_tasks(self): command = self.makeFile("#!/bin/sh\necho RUN!\n") os.chmod(command, 0755) self.manager.add(self.package_manager) result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertNotIn("RUN!", log) return result.addCallback(got_result) def test_spawn_handler_doesnt_chdir(self): command = self.makeFile("#!/bin/sh\necho RUN\n") os.chmod(command, 0755) cwd = os.getcwd() self.addCleanup(os.chdir, cwd) dir = self.makeDir() os.chdir(dir) os.chmod(dir, 0) find_command_mock = self.mocker.replace(find_changer_command) find_command_mock() self.mocker.result(command) self.mocker.replay() self.manager.add(self.package_manager) self.package_store.add_task("changer", "Do something!") result = self.package_manager.spawn_handler(PackageChanger) def got_result(result): log = self.logfile.getvalue() self.assertIn("RUN", log) # restore permissions to the dir so tearDown can clean it up os.chmod(dir, 0766) return result.addCallback(got_result) def test_change_package_locks_handling(self): """ The L{PackageManager.handle_change_package_locks} method is registered as handler for messages of type C{"change-package-locks"}, and queues a package-changer task in the appropriate queue. """ self.manager.add(self.package_manager) package_manager_mock = self.mocker.patch(self.package_manager) package_manager_mock.spawn_handler(PackageChanger) self.mocker.replay() message = {"type": "change-package-locks"} self.manager.dispatch_message(message) task = self.package_store.get_next_task("changer") self.assertTrue(task) self.assertEqual(task.data, message) landscape-client-14.01/landscape/manager/tests/test_aptsources.py0000644000175000017500000003455612301414317025061 0ustar andreasandreasimport os from twisted.internet.defer import Deferred, succeed from landscape.manager.aptsources import AptSources from landscape.manager.plugin import SUCCEEDED, FAILED from landscape.lib.twisted_util import gather_results from landscape.tests.helpers import LandscapeTest, ManagerHelper from landscape.package.reporter import find_reporter_command class FakeStatResult(object): def __init__(self, st_mode, st_uid, st_gid): self.st_mode = st_mode self.st_uid = st_uid self.st_gid = st_gid class AptSourcesTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(AptSourcesTests, self).setUp() self.sourceslist = AptSources() self.sources_path = self.makeDir() self.sourceslist.SOURCES_LIST = os.path.join(self.sources_path, "sources.list") sources_d = os.path.join(self.sources_path, "sources.list.d") os.mkdir(sources_d) self.sourceslist.SOURCES_LIST_D = sources_d self.manager.add(self.sourceslist) sources = file(self.sourceslist.SOURCES_LIST, "w") sources.write("\n") sources.close() service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) self.sourceslist._run_process = lambda *args, **kwargs: succeed(None) self.log_helper.ignore_errors(".*") def test_comment_sources_list(self): """ When getting a repository message, L{AptSources} comments the whole sources.list file. """ sources = file(self.sourceslist.SOURCES_LIST, "w") sources.write("oki\n\ndoki\n#comment\n # other comment\n") sources.close() self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], "operation-id": 1}) self.assertEqual( "#oki\n\n#doki\n#comment\n # other comment\n", file(self.sourceslist.SOURCES_LIST).read()) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 1}]) def test_sources_list_permissions(self): """ When getting a repository message, L{AptSources} keeps sources.list permissions. """ sources = file(self.sourceslist.SOURCES_LIST, "w") sources.write("oki\n\ndoki\n#comment\n # other comment\n") sources.close() # change file mode from default to check it's restored os.chmod(self.sourceslist.SOURCES_LIST, 0400) sources_stat_orig = os.stat(self.sourceslist.SOURCES_LIST) fake_stats = FakeStatResult(st_mode=sources_stat_orig.st_mode, st_uid=30, st_gid=30) os_stat = self.mocker.replace("os.stat") os_stat(self.sourceslist.SOURCES_LIST) self.mocker.result(fake_stats) self.mocker.count(1, max=10) os_chown = self.mocker.replace("os.chown") os_chown(self.sourceslist.SOURCES_LIST, fake_stats.st_uid, fake_stats.st_gid) self.mocker.replay() self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], "operation-id": 1}) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 1}]) sources_stat_after = os.stat(self.sourceslist.SOURCES_LIST) self.assertEqual(sources_stat_orig.st_mode, sources_stat_after.st_mode) def test_random_failures(self): """ If a failure happens during the manipulation of sources, the activity is reported as FAILED with the error message. """ self.sourceslist.SOURCES_LIST = "/doesntexist" self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], "operation-id": 1}) msg = "IOError: [Errno 2] No such file or directory: '/doesntexist'" service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": msg, "status": FAILED, "operation-id": 1}]) def test_rename_sources_list_d(self): """ The sources files in sources.list.d are renamed to .save when a message is received. """ sources1 = file( os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"), "w") sources1.write("ok\n") sources1.close() sources2 = file( os.path.join(self.sourceslist.SOURCES_LIST_D, "file2.list.save"), "w") sources2.write("ok\n") sources2.close() self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], "operation-id": 1}) self.assertFalse( os.path.exists( os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list"))) self.assertTrue( os.path.exists( os.path.join(self.sourceslist.SOURCES_LIST_D, "file1.list.save"))) self.assertTrue( os.path.exists( os.path.join(self.sourceslist.SOURCES_LIST_D, "file2.list.save"))) def test_create_landscape_sources(self): """ For every sources listed in the sources field of the message, C{AptSources} creates a file with the content in sources.list.d. """ sources = [{"name": "dev", "content": "oki\n"}, {"name": "lucid", "content": "doki\n"}] self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": sources, "gpg-keys": [], "operation-id": 1}) dev_file = os.path.join(self.sourceslist.SOURCES_LIST_D, "landscape-dev.list") self.assertTrue(os.path.exists(dev_file)) self.assertEqual("oki\n", file(dev_file).read()) lucid_file = os.path.join(self.sourceslist.SOURCES_LIST_D, "landscape-lucid.list") self.assertTrue(os.path.exists(lucid_file)) self.assertEqual("doki\n", file(lucid_file).read()) def test_import_gpg_keys(self): """ C{AptSources} runs a process with apt-key for every keys in the message. """ deferred = Deferred() def _run_process(command, args, env={}, path=None, uid=None, gid=None): self.assertEqual("/usr/bin/apt-key", command) self.assertEqual("add", args[0]) filename = args[1] self.assertEqual("Some key content", file(filename).read()) deferred.callback(("ok", "", 0)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["Some key content"], "operation-id": 1}) return deferred def test_import_delete_temporary_files(self): """ The files created to be imported by C{apt-key} are removed after the import. """ deferred = Deferred() filenames = [] def _run_process(command, args, env={}, path=None, uid=None, gid=None): if not filenames: filenames.append(args[1]) deferred.callback(("ok", "", 0)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["Some key content"], "operation-id": 1}) self.assertFalse(os.path.exists(filenames[0])) return deferred def test_failed_import_delete_temporary_files(self): """ The files created to be imported by C{apt-key} are removed after the import, even if there is a failure. """ deferred = Deferred() filenames = [] def _run_process(command, args, env={}, path=None, uid=None, gid=None): filenames.append(args[1]) deferred.callback(("error", "", 1)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["Some key content"], "operation-id": 1}) self.assertFalse(os.path.exists(filenames[0])) return deferred def test_failed_import_reported(self): """ If the C{apt-key} command failed for some reasons, the output of the command is reported and the activity fails. """ deferred = Deferred() def _run_process(command, args, env={}, path=None, uid=None, gid=None): deferred.callback(("nok", "some error", 1)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["key"], "operation-id": 1}) service = self.broker_service msg = "ProcessError: nok\nsome error" self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": msg, "status": FAILED, "operation-id": 1}]) return deferred def test_signaled_import_reported(self): """ If the C{apt-key} fails with a signal, the output of the command is reported and the activity fails. """ deferred = Deferred() def _run_process(command, args, env={}, path=None, uid=None, gid=None): deferred.errback(("nok", "some error", 1)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["key"], "operation-id": 1}) service = self.broker_service msg = "ProcessError: nok\nsome error" self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": msg, "status": FAILED, "operation-id": 1}]) return deferred def test_failed_import_no_changes(self): """ If the C{apt-key} command failed for some reasons, the current repositories aren't changed. """ deferred = Deferred() def _run_process(command, args, env={}, path=None, uid=None, gid=None): deferred.callback(("nok", "some error", 1)) return deferred self.sourceslist._run_process = _run_process sources = file(self.sourceslist.SOURCES_LIST, "w") sources.write("oki\n\ndoki\n#comment\n") sources.close() self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["key"], "operation-id": 1}) self.assertEqual( "oki\n\ndoki\n#comment\n", file(self.sourceslist.SOURCES_LIST).read()) return deferred def test_multiple_import_sequential(self): """ If multiple keys are specified, the imports run sequentially, not in parallel. """ deferred1 = Deferred() deferred2 = Deferred() deferreds = [deferred1, deferred2] def _run_process(command, args, env={}, path=None, uid=None, gid=None): if not deferreds: return succeed(None) return deferreds.pop(0) self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["key1", "key2"], "operation-id": 1}) self.assertEqual(1, len(deferreds)) deferred1.callback(("ok", "", 0)) self.assertEqual(0, len(deferreds)) deferred2.callback(("ok", "", 0)) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 1}]) return gather_results(deferreds) def test_multiple_import_failure(self): """ If multiple keys are specified, and that the first one fails, the error is correctly reported. """ deferred1 = Deferred() deferred2 = Deferred() deferreds = [deferred1, deferred2] def _run_process(command, args, env={}, path=None, uid=None, gid=None): return deferreds.pop(0) self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": ["key1", "key2"], "operation-id": 1}) deferred1.callback(("error", "", 1)) deferred2.callback(("error", "", 1)) msg = "ProcessError: error\n" service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": msg, "status": FAILED, "operation-id": 1}]) return gather_results(deferreds) def test_run_reporter(self): """ After receiving a message, L{AptSources} triggers a reporter run to have the new packages reported to the server. """ deferred = Deferred() def _run_process(command, args, env={}, path=None, uid=None, gid=None): self.assertEqual(find_reporter_command(), command) self.assertEqual(["--force-apt-update", "--config=%s" % self.manager.config.config], args) deferred.callback(("ok", "", 0)) return deferred self.sourceslist._run_process = _run_process self.manager.dispatch_message( {"type": "apt-sources-replace", "sources": [], "gpg-keys": [], "operation-id": 1}) return deferred landscape-client-14.01/landscape/manager/tests/test_hardwareinfo.py0000644000175000017500000000407212301414317025330 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest, ManagerHelper from landscape.manager.hardwareinfo import HardwareInfo class HardwareInfoTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(HardwareInfoTests, self).setUp() self.info = HardwareInfo() self.info.command = "/bin/echo" self.manager.add(self.info) service = self.broker_service service.message_store.set_accepted_types(["hardware-info"]) def test_message(self): """ L{HardwareInfo} sends the output of its command when running. """ deferred = self.info.send_message() def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) return deferred.addCallback(check) def test_run_upgraded_system(self): """ L{HardwareInfo} sends the output of its command when running on a system that has been upgraded to include this plugin, i.e. where the client already knows that it can send the hardware-info message. """ deferred = self.info.run() def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) return deferred.addCallback(check) def test_only_on_register(self): """ C{call_on_accepted} is only called at register time, to not accumulate callbacks to the "message-type-acceptance-changed" event. """ calls = [] self.info.call_on_accepted = lambda x, y: calls.append((x, y)) deferred = self.info.run() def check(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": u"-xml -quiet\n", "type": "hardware-info"}]) self.assertEqual([], calls) return deferred.addCallback(check) landscape-client-14.01/landscape/manager/tests/test_shutdownmanager.py0000644000175000017500000001454512301414317026073 0ustar andreasandreasfrom twisted.python.failure import Failure from twisted.internet.error import ProcessTerminated, ProcessDone from landscape import SERVER_API from landscape.manager.plugin import SUCCEEDED, FAILED from landscape.manager.shutdownmanager import ( ShutdownManager, ShutdownProcessProtocol) from landscape.tests.helpers import ( LandscapeTest, ManagerHelper, StubProcessFactory) class ShutdownManagerTest(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(ShutdownManagerTest, self).setUp() self.broker_service.message_store.set_accepted_types( ["shutdown", "operation-result"]) self.broker_service.pinger.start() self.process_factory = StubProcessFactory() self.plugin = ShutdownManager(process_factory=self.process_factory) self.manager.add(self.plugin) def test_restart(self): """ C{shutdown} processes run until the shutdown is to be performed. The L{ShutdownProcessProtocol} watches a process for errors, for 10 seconds by default, and if none occur the activity is marked as L{SUCCEEDED}. Data printed by the process is included in the activity's result text. """ message = {"type": "shutdown", "reboot": True, "operation-id": 100} self.plugin.perform_shutdown(message) [arguments] = self.process_factory.spawns protocol = arguments[0] self.assertTrue(isinstance(protocol, ShutdownProcessProtocol)) self.assertEqual( arguments[1:3], ("/sbin/shutdown", ["/sbin/shutdown", "-r", "+4", "Landscape is rebooting the system"])) def restart_performed(ignore): self.assertTrue(self.broker_service.exchanger.is_urgent()) self.assertEqual( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "api": SERVER_API, "operation-id": 100, "timestamp": 10, "status": SUCCEEDED, "result-text": u"Data may arrive in batches."}]) protocol.result.addCallback(restart_performed) protocol.childDataReceived(0, "Data may arrive ") protocol.childDataReceived(0, "in batches.") # We need to advance both reactors to simulate that fact they # are loosely in sync with each other self.broker_service.reactor.advance(10) self.manager.reactor.advance(10) return protocol.result def test_shutdown(self): """ C{shutdown} messages have a flag that indicates whether a reboot or shutdown has been requested. The C{shutdown} command is called appropriately. """ message = {"type": "shutdown", "reboot": False, "operation-id": 100} self.plugin.perform_shutdown(message) [arguments] = self.process_factory.spawns self.assertEqual( arguments[1:3], ("/sbin/shutdown", ["/sbin/shutdown", "-h", "+4", "Landscape is shutting down the system"])) def test_restart_fails(self): """ If an error occurs before the error checking timeout the activity will be failed. Data printed by the process prior to the failure is included in the activity's result text. """ message = {"type": "shutdown", "reboot": False, "operation-id": 100} self.plugin.perform_shutdown(message) def restart_failed(message_id): self.assertTrue(self.broker_service.exchanger.is_urgent()) self.assertEqual( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "api": SERVER_API, "operation-id": 100, "timestamp": 0, "status": FAILED, "result-text": u"Failure text is reported."}]) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.result.addCallback(restart_failed) protocol.childDataReceived(0, "Failure text is reported.") protocol.processEnded(Failure(ProcessTerminated(exitCode=1))) return protocol.result def test_process_ends_after_timeout(self): """ If the process ends after the error checking timeout has passed C{result} will not be re-fired. """ message = {"type": "shutdown", "reboot": False, "operation-id": 100} self.plugin.perform_shutdown(message) stash = [] def restart_performed(ignore): self.assertEqual(stash, []) stash.append(True) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.result.addCallback(restart_performed) self.manager.reactor.advance(10) protocol.processEnded(Failure(ProcessTerminated(exitCode=1))) return protocol.result def test_process_data_is_not_collected_after_firing_result(self): """ Data printed in the sub-process is not collected after C{result} has been fired. """ message = {"type": "shutdown", "reboot": False, "operation-id": 100} self.plugin.perform_shutdown(message) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.childDataReceived(0, "Data may arrive ") protocol.childDataReceived(0, "in batches.") self.manager.reactor.advance(10) self.assertEqual(protocol.get_data(), "Data may arrive in batches.") protocol.childDataReceived(0, "Even when you least expect it.") self.assertEqual(protocol.get_data(), "Data may arrive in batches.") def test_restart_stops_exchanger(self): """ After a successful shutdown, the broker stops processing new messages. """ message = {"type": "shutdown", "reboot": False, "operation-id": 100} self.plugin.perform_shutdown(message) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.processEnded(Failure(ProcessDone(status=0))) self.broker_service.reactor.advance(100) self.manager.reactor.advance(100) # New messages will not be exchanged after a reboot process is in # process. self.manager.broker.exchanger.schedule_exchange() payloads = self.manager.broker.exchanger._transport.payloads self.assertEqual(0, len(payloads)) return protocol.result landscape-client-14.01/landscape/manager/tests/test_scriptexecution.py0000644000175000017500000010646712301414317026122 0ustar andreasandreasimport pwd import os import sys import tempfile import stat from twisted.internet.defer import gatherResults, succeed, fail from twisted.internet.error import ProcessDone from twisted.python.failure import Failure from landscape import VERSION from landscape.lib.fetch import HTTPCodeError from landscape.lib.persist import Persist from landscape.manager.scriptexecution import ( ScriptExecutionPlugin, ProcessTimeLimitReachedError, PROCESS_FAILED_RESULT, UBUNTU_PATH, get_user_info, UnknownInterpreterError, UnknownUserError, FETCH_ATTACHMENTS_FAILED_RESULT) from landscape.manager.manager import SUCCEEDED, FAILED from landscape.tests.helpers import ( LandscapeTest, ManagerHelper, StubProcessFactory, DummyProcess) from landscape.tests.mocker import ANY, ARGS def get_default_environment(): username = pwd.getpwuid(os.getuid())[0] uid, gid, home = get_user_info(username) return { "PATH": UBUNTU_PATH, "USER": username, "HOME": home} class RunScriptTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(RunScriptTests, self).setUp() self.plugin = ScriptExecutionPlugin() self.manager.add(self.plugin) def test_basic_run(self): """ The plugin returns a Deferred resulting in the output of basic commands. """ result = self.plugin.run_script("/bin/sh", "echo hi") result.addCallback(self.assertEqual, "hi\n") return result def test_other_interpreter(self): """Non-shell interpreters can be specified.""" result = self.plugin.run_script("/usr/bin/python", "print 'hi'") result.addCallback(self.assertEqual, "hi\n") return result def test_other_interpreter_env(self): """ Non-shell interpreters don't have their paths set by the shell, so we need to check that other interpreters have environment variables set. """ result = self.plugin.run_script( sys.executable, "import os\nprint os.environ") def check_environment(results): for string in get_default_environment(): self.assertIn(string, results) result.addCallback(check_environment) return result def test_server_supplied_env(self): """ Server-supplied environment variables are merged with default variables then passed to script. """ server_supplied_env = {"DOG": "Woof", "CAT": "Meow"} result = self.plugin.run_script( sys.executable, "import os\nprint os.environ", server_supplied_env=server_supplied_env) def check_environment(results): for string in get_default_environment(): self.assertIn(string, results) for name, value in server_supplied_env.items(): self.assertIn(name, results) self.assertIn(value, results) result.addCallback(check_environment) return result def test_server_supplied_env_overrides_client(self): """ Server-supplied environment variables override client default values if the server provides them. """ server_supplied_env = {"PATH": "server-path", "USER": "server-user", "HOME": "server-home"} result = self.plugin.run_script( sys.executable, "import os\nprint os.environ", server_supplied_env=server_supplied_env) def check_environment(results): for name, value in server_supplied_env.items(): self.assertIn(name, results) self.assertIn(value, results) result.addCallback(check_environment) return result def test_concurrent(self): """ Scripts run with the ScriptExecutionPlugin plugin are run concurrently. """ fifo = self.makeFile() os.mkfifo(fifo) self.addCleanup(os.remove, fifo) # If the first process is blocking on a fifo, and the second process # wants to write to the fifo, the only way this will complete is if # run_script is truly async d1 = self.plugin.run_script("/bin/sh", "cat " + fifo) d2 = self.plugin.run_script("/bin/sh", "echo hi > " + fifo) d1.addCallback(self.assertEqual, "hi\n") d2.addCallback(self.assertEqual, "") return gatherResults([d1, d2]) def test_accented_run_in_code(self): """ Scripts can contain accented data both in the code and in the result. """ accented_content = u"\N{LATIN SMALL LETTER E WITH ACUTE}" result = self.plugin.run_script( u"/bin/sh", u"echo %s" % (accented_content,)) result.addCallback( self.assertEqual, "%s\n" % (accented_content.encode("utf-8"),)) return result def test_accented_run_in_interpreter(self): """ Scripts can also contain accents in the interpreter. """ accented_content = u"\N{LATIN SMALL LETTER E WITH ACUTE}" result = self.plugin.run_script( u"/bin/echo %s" % (accented_content,), u"") def check(result): self.assertTrue( "%s " % (accented_content.encode("utf-8"),) in result) result.addCallback(check) return result def test_set_umask_appropriately(self): """ We should be setting the umask to 0022 before executing a script, and restoring it to the previous value when finishing. """ # Get original umask. old_umask = os.umask(0) os.umask(old_umask) mock_umask = self.mocker.replace("os.umask") mock_umask(0022) self.mocker.result(old_umask) mock_umask(old_umask) self.mocker.replay() result = self.plugin.run_script("/bin/sh", "umask") result.addCallback(self.assertEqual, "%04o\n" % old_umask) return result def test_restore_umask_in_event_of_error(self): """ We set the umask before executing the script, in the event that there's an error setting up the script, we want to restore the umask. """ mock_umask = self.mocker.replace("os.umask") mock_umask(0022) self.mocker.result(0077) mock_mkdtemp = self.mocker.replace("tempfile.mkdtemp", passthrough=False) mock_mkdtemp() self.mocker.throw(OSError("Fail!")) mock_umask(0077) self.mocker.replay() result = self.plugin.run_script("/bin/sh", "umask", attachments={u"file1": "some data"}) return self.assertFailure(result, OSError) def test_run_with_attachments(self): result = self.plugin.run_script( u"/bin/sh", u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", attachments={u"file1": "some data"}) def check(result): self.assertEqual(result, "file1\nsome data") result.addCallback(check) return result def test_run_with_attachment_ids(self): """ The most recent protocol for script message doesn't include the attachment body inside the message itself, but instead gives an attachment ID, and the plugin fetches the files separately. """ self.manager.config.url = "https://localhost/message-system" persist = Persist( filename=os.path.join(self.config.data_path, "broker.bpickle")) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", "secure_id") persist.save() mock_fetch = self.mocker.replace("landscape.lib.fetch.fetch_async", passthrough=False) headers = {"User-Agent": "landscape-client/%s" % VERSION, "Content-Type": "application/octet-stream", "X-Computer-ID": "secure_id"} mock_fetch("https://localhost/attachment/14", headers=headers, cainfo=None) self.mocker.result(succeed("some other data")) self.mocker.replay() result = self.plugin.run_script( u"/bin/sh", u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", attachments={u"file1": 14}) def check(result): self.assertEqual(result, "file1\nsome other data") result.addCallback(check) return result def test_run_with_attachment_ids_and_ssl(self): """ When fetching attachments, L{ScriptExecution} passes the optional ssl certificate file if the configuration specifies it. """ self.manager.config.url = "https://localhost/message-system" self.manager.config.ssl_public_key = "/some/key" persist = Persist( filename=os.path.join(self.config.data_path, "broker.bpickle")) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", "secure_id") persist.save() mock_fetch = self.mocker.replace("landscape.lib.fetch.fetch_async", passthrough=False) headers = {"User-Agent": "landscape-client/%s" % VERSION, "Content-Type": "application/octet-stream", "X-Computer-ID": "secure_id"} mock_fetch("https://localhost/attachment/14", headers=headers, cainfo="/some/key") self.mocker.result(succeed("some other data")) self.mocker.replay() result = self.plugin.run_script( u"/bin/sh", u"ls $LANDSCAPE_ATTACHMENTS && cat $LANDSCAPE_ATTACHMENTS/file1", attachments={u"file1": 14}) def check(result): self.assertEqual(result, "file1\nsome other data") result.addCallback(check) return result def test_self_remove_script(self): """ If a script removes itself, it doesn't create an error when the script execution plugin tries to remove the script file. """ result = self.plugin.run_script("/bin/sh", "echo hi && rm $0") result.addCallback(self.assertEqual, "hi\n") return result def test_self_remove_attachments(self): """ If a script removes its attachments, it doesn't create an error when the script execution plugin tries to remove the attachments directory. """ result = self.plugin.run_script( u"/bin/sh", u"ls $LANDSCAPE_ATTACHMENTS && rm -r $LANDSCAPE_ATTACHMENTS", attachments={u"file1": "some data"}) def check(result): self.assertEqual(result, "file1\n") result.addCallback(check) return result def _run_script(self, username, uid, gid, path): # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) expected_uid = uid if uid != os.getuid() else None expected_gid = gid if gid != os.getgid() else None factory = StubProcessFactory() self.plugin.process_factory = factory self.mocker.replay() result = self.plugin.run_script("/bin/sh", "echo hi", user=username) self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertEqual(spawn[4], path) self.assertEqual(spawn[5], expected_uid) self.assertEqual(spawn[6], expected_gid) result.addCallback(self.assertEqual, "foobar") protocol = spawn[0] protocol.childDataReceived(1, "foobar") for fd in (0, 1, 2): protocol.childConnectionLost(fd) protocol.processEnded(Failure(ProcessDone(0))) return result def test_user(self): """ Running a script as a particular user calls C{IReactorProcess.spawnProcess} with an appropriate C{uid} argument, with the user's primary group as the C{gid} argument and with the user home as C{path} argument. """ uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name gid = info.pw_gid path = info.pw_dir return self._run_script(username, uid, gid, path) def test_user_no_home(self): """ When the user specified to C{run_script} doesn't have a home, the script executes in '/'. """ mock_getpwnam = self.mocker.replace("pwd.getpwnam", passthrough=False) class pwnam(object): pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() self.expect(mock_getpwnam("user")).result(pwnam) return self._run_script("user", 1234, 5678, "/") def test_user_with_attachments(self): uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name gid = info.pw_gid mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ANY, uid, gid) self.mocker.count(3) factory = StubProcessFactory() self.plugin.process_factory = factory self.mocker.replay() result = self.plugin.run_script("/bin/sh", "echo hi", user=username, attachments={u"file 1": "some data"}) self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertIn("LANDSCAPE_ATTACHMENTS", spawn[3]) attachment_dir = spawn[3]["LANDSCAPE_ATTACHMENTS"] self.assertEqual(stat.S_IMODE(os.stat(attachment_dir).st_mode), 0700) filename = os.path.join(attachment_dir, "file 1") self.assertEqual(stat.S_IMODE(os.stat(filename).st_mode), 0600) protocol = spawn[0] protocol.childDataReceived(1, "foobar") for fd in (0, 1, 2): protocol.childConnectionLost(fd) protocol.processEnded(Failure(ProcessDone(0))) def check(data): self.assertEqual(data, "foobar") self.assertFalse(os.path.exists(attachment_dir)) return result.addCallback(check) def test_limit_size(self): """Data returned from the command is limited.""" factory = StubProcessFactory() self.plugin.process_factory = factory self.plugin.size_limit = 100 result = self.plugin.run_script("/bin/sh", "") result.addCallback(self.assertEqual, "x" * 100) protocol = factory.spawns[0][0] protocol.childDataReceived(1, "x" * 200) for fd in (0, 1, 2): protocol.childConnectionLost(fd) protocol.processEnded(Failure(ProcessDone(0))) return result def test_limit_time(self): """ The process only lasts for a certain number of seconds. """ result = self.plugin.run_script("/bin/sh", "cat", time_limit=500) self.manager.reactor.advance(501) self.assertFailure(result, ProcessTimeLimitReachedError) return result def test_limit_time_accumulates_data(self): """ Data from processes that time out should still be accumulated and available from the exception object that is raised. """ factory = StubProcessFactory() self.plugin.process_factory = factory result = self.plugin.run_script("/bin/sh", "", time_limit=500) protocol = factory.spawns[0][0] protocol.makeConnection(DummyProcess()) protocol.childDataReceived(1, "hi\n") self.manager.reactor.advance(501) protocol.processEnded(Failure(ProcessDone(0))) def got_error(f): self.assertTrue(f.check(ProcessTimeLimitReachedError)) self.assertEqual(f.value.data, "hi\n") result.addErrback(got_error) return result def test_time_limit_canceled_after_success(self): """ The timeout call is cancelled after the script terminates. """ factory = StubProcessFactory() self.plugin.process_factory = factory self.plugin.run_script("/bin/sh", "", time_limit=500) protocol = factory.spawns[0][0] transport = DummyProcess() protocol.makeConnection(transport) protocol.childDataReceived(1, "hi\n") protocol.processEnded(Failure(ProcessDone(0))) self.manager.reactor.advance(501) self.assertEqual(transport.signals, []) def test_cancel_doesnt_blow_after_success(self): """ When the process ends successfully and is immediately followed by the timeout, the output should still be in the failure and nothing bad will happen! [regression test: killing of the already-dead process would blow up.] """ factory = StubProcessFactory() self.plugin.process_factory = factory result = self.plugin.run_script("/bin/sh", "", time_limit=500) protocol = factory.spawns[0][0] protocol.makeConnection(DummyProcess()) protocol.childDataReceived(1, "hi") protocol.processEnded(Failure(ProcessDone(0))) self.manager.reactor.advance(501) def got_result(output): self.assertEqual(output, "hi") result.addCallback(got_result) return result def test_script_is_owned_by_user(self): """ This is a very white-box test. When a script is generated, it must be created such that data NEVER gets into it before the file has the correct permissions. Therefore os.chmod and os.chown must be called before data is written. """ username = pwd.getpwuid(os.getuid())[0] uid, gid, home = get_user_info(username) mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chmod = self.mocker.replace("os.chmod", passthrough=False) mock_mkstemp = self.mocker.replace("tempfile.mkstemp", passthrough=False) mock_fdopen = self.mocker.replace("os.fdopen", passthrough=False) process_factory = self.mocker.mock() self.plugin.process_factory = process_factory self.mocker.order() self.expect(mock_mkstemp()).result((99, "tempo!")) script_file = mock_fdopen(99, "w") mock_chmod("tempo!", 0700) mock_chown("tempo!", uid, gid) # The contents are written *after* the permissions have been set up! script_file.write("#!/bin/sh\ncode") script_file.close() process_factory.spawnProcess( ANY, ANY, uid=None, gid=None, path=ANY, env=get_default_environment()) self.mocker.replay() # We don't really care about the deferred that's returned, as long as # those things happened in the correct order. self.plugin.run_script("/bin/sh", "code", user=pwd.getpwuid(uid)[0]) def test_script_removed(self): """ The script is removed after it is finished. """ mock_mkstemp = self.mocker.replace("tempfile.mkstemp", passthrough=False) fd, filename = tempfile.mkstemp() self.expect(mock_mkstemp()).result((fd, filename)) self.mocker.replay() d = self.plugin.run_script("/bin/sh", "true") d.addCallback(lambda ign: self.assertFalse(os.path.exists(filename))) return d def test_unknown_interpreter(self): """ If the script is run with an unknown interpreter, it raises a meaningful error instead of crashing in execvpe. """ d = self.plugin.run_script("/bin/cantpossiblyexist", "stuff") def cb(ignore): self.fail("Should not be there") def eb(failure): failure.trap(UnknownInterpreterError) self.assertEqual( failure.value.interpreter, "/bin/cantpossiblyexist") return d.addCallback(cb).addErrback(eb) class ScriptExecutionMessageTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(ScriptExecutionMessageTests, self).setUp() self.broker_service.message_store.set_accepted_types( ["operation-result"]) self.manager.config.script_users = "ALL" def _verify_script(self, executable, interp, code): """ Given spawnProcess arguments, check to make sure that the temporary script has the correct content. """ data = open(executable, "r").read() self.assertEqual(data, "#!%s\n%s" % (interp, code)) def _send_script(self, interpreter, code, operation_id=123, user=pwd.getpwuid(os.getuid())[0], time_limit=None, attachments={}, server_supplied_env=None): message = {"type": "execute-script", "interpreter": interpreter, "code": code, "operation-id": operation_id, "username": user, "time-limit": time_limit, "attachments": dict(attachments)} if server_supplied_env: message["env"] = server_supplied_env return self.manager.dispatch_message(message) def test_success(self): """ When a C{execute-script} message is received from the server, the specified script will be run and an operation-result will be sent back to the server. """ # Let's use a stub process factory, because otherwise we don't have # access to the deferred. factory = StubProcessFactory() # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) self.manager.add(ScriptExecutionPlugin(process_factory=factory)) self.mocker.replay() result = self._send_script(sys.executable, "print 'hi'") self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") self.assertMessages( self.broker_service.message_store.get_pending_messages(), []) # Now let's simulate the completion of the process factory.spawns[0][0].childDataReceived(1, "hi!\n") factory.spawns[0][0].processEnded(Failure(ProcessDone(0))) def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": SUCCEEDED, "result-text": u"hi!\n"}]) result.addCallback(got_result) return result def test_success_with_server_supplied_env(self): """ When a C{execute-script} message is received from the server, the specified script will be run with the supplied environment and an operation-result will be sent back to the server. """ # Let's use a stub process factory, because otherwise we don't have # access to the deferred. factory = StubProcessFactory() # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) self.manager.add(ScriptExecutionPlugin(process_factory=factory)) self.mocker.replay() result = self._send_script(sys.executable, "print 'hi'", server_supplied_env={"Dog": "Woof"}) self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") # Verify environment was passed self.assertIn("HOME", factory.spawns[0][3]) self.assertIn("USER", factory.spawns[0][3]) self.assertIn("PATH", factory.spawns[0][3]) self.assertIn("Dog", factory.spawns[0][3]) self.assertEqual("Woof", factory.spawns[0][3]["Dog"]) self.assertMessages( self.broker_service.message_store.get_pending_messages(), []) # Now let's simulate the completion of the process factory.spawns[0][0].childDataReceived(1, "Woof\n") factory.spawns[0][0].processEnded(Failure(ProcessDone(0))) def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": SUCCEEDED, "result-text": u"Woof\n"}]) result.addCallback(got_result) return result def test_user(self): """A user can be specified in the message.""" username = pwd.getpwuid(os.getuid())[0] uid, gid, home = get_user_info(username) # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) def spawn_called(protocol, filename, uid, gid, path, env): protocol.childDataReceived(1, "hi!\n") protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") process_factory = self.mocker.mock() process_factory.spawnProcess( ANY, ANY, uid=None, gid=None, path=ANY, env=get_default_environment()) self.mocker.call(spawn_called) self.mocker.replay() self.manager.add( ScriptExecutionPlugin(process_factory=process_factory)) result = self._send_script(sys.executable, "print 'hi'", user=username) return result def test_unknown_user_with_unicode(self): """ If an error happens because an unknow user is selected, and that this user name happens to contain unicode characters, the error message is correctly encoded and reported. This test mainly ensures that unicode error message works, using unknown user as an easy way to test it. """ self.log_helper.ignore_errors(UnknownUserError) username = u"non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" self.manager.add( ScriptExecutionPlugin()) self._send_script(sys.executable, "print 'hi'", user=username) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "result-text": u"UnknownUserError: Unknown user '%s'" % username, "status": FAILED}]) def test_timeout(self): """ If a L{ProcessTimeLimitReachedError} is fired back, the operation-result should have a failed status. """ factory = StubProcessFactory() self.manager.add(ScriptExecutionPlugin(process_factory=factory)) # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) self.mocker.replay() result = self._send_script(sys.executable, "bar", time_limit=30) self._verify_script(factory.spawns[0][1], sys.executable, "bar") protocol = factory.spawns[0][0] protocol.makeConnection(DummyProcess()) protocol.childDataReceived(2, "ONOEZ") self.manager.reactor.advance(31) protocol.processEnded(Failure(ProcessDone(0))) def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": FAILED, "result-text": u"ONOEZ", "result-code": 102}]) result.addCallback(got_result) return result def test_configured_users(self): """ Messages which try to run a script as a user that is not allowed should be rejected. """ self.manager.add(ScriptExecutionPlugin()) self.manager.config.script_users = "landscape, nobody" result = self._send_script(sys.executable, "bar", user="whatever") def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": FAILED, "result-text": u"Scripts cannot be run as user whatever."}]) result.addCallback(got_result) return result def test_urgent_response(self): """Responses to script execution messages are urgent.""" # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) def spawn_called(protocol, filename, uid, gid, path, env): protocol.childDataReceived(1, "hi!\n") protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") process_factory = self.mocker.mock() process_factory.spawnProcess( ANY, ANY, uid=None, gid=None, path=ANY, env=get_default_environment()) self.mocker.call(spawn_called) self.mocker.replay() self.manager.add( ScriptExecutionPlugin(process_factory=process_factory)) def got_result(r): self.assertTrue(self.broker_service.exchanger.is_urgent()) self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "result-text": u"hi!\n", "status": SUCCEEDED}]) result = self._send_script(sys.executable, "print 'hi'") result.addCallback(got_result) return result def test_binary_output(self): """ If a script outputs non-printable characters not handled by utf-8, they are replaced during the encoding phase but the script succeeds. """ mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) def spawn_called(protocol, filename, uid, gid, path, env): protocol.childDataReceived(1, "\x7fELF\x01\x01\x01\x00\x00\x00\x95\x01") protocol.processEnded(Failure(ProcessDone(0))) self._verify_script(filename, sys.executable, "print 'hi'") process_factory = self.mocker.mock() process_factory.spawnProcess( ANY, ANY, uid=None, gid=None, path=ANY, env=get_default_environment()) self.mocker.call(spawn_called) self.mocker.replay() self.manager.add( ScriptExecutionPlugin(process_factory=process_factory)) def got_result(r): self.assertTrue(self.broker_service.exchanger.is_urgent()) [message] = ( self.broker_service.message_store.get_pending_messages()) self.assertEqual( message["result-text"], u"\x7fELF\x01\x01\x01\x00\x00\x00\ufffd\x01") result = self._send_script(sys.executable, "print 'hi'") result.addCallback(got_result) return result def test_parse_error_causes_operation_failure(self): """ If there is an error parsing the message, an operation-result will be sent (assuming operation-id *is* successfully parsed). """ self.log_helper.ignore_errors(KeyError) self.manager.add(ScriptExecutionPlugin()) self.manager.dispatch_message( {"type": "execute-script", "operation-id": 444}) expected_message = [{"type": "operation-result", "operation-id": 444, "result-text": u"KeyError: username", "status": FAILED}] self.assertMessages( self.broker_service.message_store.get_pending_messages(), expected_message) self.assertTrue("KeyError: 'username'" in self.logfile.getvalue()) def test_non_zero_exit_fails_operation(self): """ If a script exits with a nen-zero exit code, the operation associated with it should fail, but the data collected should still be sent. """ # Mock a bunch of crap so that we can run a real process self.mocker.replace("os.chown", passthrough=False)(ARGS) self.mocker.replace("os.setuid", passthrough=False)(ARGS) self.mocker.count(0, None) self.mocker.replace("os.setgid", passthrough=False)(ARGS) self.mocker.count(0, None) self.mocker.count(0, None) self.mocker.replay() self.manager.add(ScriptExecutionPlugin()) result = self._send_script("/bin/sh", "echo hi; exit 1") def got_result(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "result-text": "hi\n", "result-code": PROCESS_FAILED_RESULT, "status": FAILED}]) return result.addCallback(got_result) def test_unknown_error(self): """ When a completely unknown error comes back from the process protocol, the operation fails and the formatted failure is included in the response message. """ factory = StubProcessFactory() # ignore the call to chown! mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ARGS) self.manager.add(ScriptExecutionPlugin(process_factory=factory)) self.mocker.replay() result = self._send_script(sys.executable, "print 'hi'") self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'") self.assertMessages( self.broker_service.message_store.get_pending_messages(), []) failure = Failure(RuntimeError("Oh noes!")) factory.spawns[0][0].result_deferred.errback(failure) def got_result(r): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": FAILED, "result-text": str(failure)}]) result.addCallback(got_result) return result def test_fetch_attachment_failure(self): """ If the plugin fails to retrieve the attachments with a L{HTTPCodeError}, a specific error code is shown. """ self.manager.config.url = "https://localhost/message-system" persist = Persist( filename=os.path.join(self.config.data_path, "broker.bpickle")) registration_persist = persist.root_at("registration") registration_persist.set("secure-id", "secure_id") persist.save() mock_fetch = self.mocker.replace("landscape.lib.fetch.fetch_async", passthrough=False) headers = {"User-Agent": "landscape-client/%s" % VERSION, "Content-Type": "application/octet-stream", "X-Computer-ID": "secure_id"} mock_fetch("https://localhost/attachment/14", headers=headers, cainfo=None) self.mocker.result(fail(HTTPCodeError(404, "Not found"))) self.mocker.replay() self.manager.add(ScriptExecutionPlugin()) result = self._send_script( "/bin/sh", "echo hi", attachments={u"file1": 14}) def got_result(ignored): self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "result-text": "Server returned HTTP code 404", "result-code": FETCH_ATTACHMENTS_FAILED_RESULT, "status": FAILED}]) return result.addCallback(got_result) landscape-client-14.01/landscape/manager/tests/test_service.py0000644000175000017500000000440512301414317024317 0ustar andreasandreasfrom landscape.tests.helpers import ( LandscapeTest, FakeBrokerServiceHelper) from landscape.reactor import FakeReactor from landscape.manager.config import ManagerConfiguration, ALL_PLUGINS from landscape.manager.service import ManagerService from landscape.manager.processkiller import ProcessKiller class ManagerServiceTest(LandscapeTest): helpers = [FakeBrokerServiceHelper] def setUp(self): super(ManagerServiceTest, self).setUp() config = ManagerConfiguration() config.load(["-c", self.config_filename]) class FakeManagerService(ManagerService): reactor_factory = FakeReactor self.service = FakeManagerService(config) def test_plugins(self): """ By default the L{ManagerService.plugins} list holds an instance of every enabled manager plugin. """ self.assertEqual(len(self.service.plugins), len(ALL_PLUGINS)) def test_get_plugins(self): """ If the C{--manager-plugins} command line option is specified, only the given plugins will be enabled. """ self.service.config.load(["--manager-plugins", "ProcessKiller"]) [plugin] = self.service.get_plugins() self.assertTrue(isinstance(plugin, ProcessKiller)) def test_start_service(self): """ The L{ManagerService.startService} method connects to the broker, starts the plugins and register the manager as broker client. """ def stop_service(ignored): for plugin in self.service.plugins: if getattr(plugin, "stop", None) is not None: plugin.stop() [connector] = self.broker_service.broker.get_connectors() connector.disconnect() self.service.stopService() self.broker_service.stopService() def assert_broker_connection(ignored): self.assertEqual(len(self.broker_service.broker.get_clients()), 1) self.assertIs(self.service.broker, self.service.manager.broker) result = self.service.broker.ping() return result.addCallback(stop_service) self.broker_service.startService() started = self.service.startService() return started.addCallback(assert_broker_connection) landscape-client-14.01/landscape/manager/tests/test_usermanager.py0000644000175000017500000020406112301414317025170 0ustar andreasandreas# -*- coding: utf-8 -*- import os from landscape.lib.persist import Persist from landscape.lib.twisted_util import gather_results from landscape.manager.plugin import SUCCEEDED, FAILED from landscape.monitor.usermonitor import UserMonitor from landscape.manager.usermanager import ( UserManager, RemoteUserManagerConnector) from landscape.user.tests.helpers import FakeUserProvider, FakeUserManagement from landscape.tests.helpers import LandscapeTest, ManagerHelper from landscape.user.provider import UserManagementError class UserGroupTestBase(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(UserGroupTestBase, self).setUp() self.shadow_file = self.makeFile("""\ jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7::: psmith:!:13348:0:99999:7::: sbarnes:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7::: """) accepted_types = ["operation-result", "users"] self.broker_service.message_store.set_accepted_types(accepted_types) def tearDown(self): super(UserGroupTestBase, self).tearDown() for plugin in self.plugins: plugin.stop() def setup_environment(self, users, groups, shadow_file): provider = FakeUserProvider(users=users, groups=groups, shadow_file=shadow_file) user_monitor = UserMonitor(provider=provider) management = FakeUserManagement(provider=provider) user_manager = UserManager(management=management, shadow_file=shadow_file) self.manager.persist = Persist() user_monitor.register(self.manager) user_manager.register(self.manager) self.plugins = [user_monitor, user_manager] return user_monitor class UserOperationsMessagingTest(UserGroupTestBase): def test_add_user_event(self): """ When an C{add-user} event is received the user should be added. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "add_user succeeded"}, {"timestamp": 0, "type": "users", "operation-id": 123, "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": "Room 101", "work-phone": "+12345", "name": u"John Doe", "primary-gid": 1000}]}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "name": "John Doe", "password": "password", "operation-id": 123, "require-password-reset": False, "primary-group-name": None, "location": "Room 101", "work-number": "+12345", "home-number": None, "type": "add-user"}) result.addCallback(handle_callback) return result def test_add_user_event_utf8(self): """ When an C{add-user} event with utf-8 unicode strings is received the user should be added. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "add_user succeeded"}, {"timestamp": 0, "type": "users", "operation-id": 123, "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": "Room 101", "work-phone": "+12345", "name": u"請不要刪除", "primary-gid": 1000}]}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "name": "請不要刪除", "password": "password", "operation-id": 123, "require-password-reset": False, "primary-group-name": None, "location": "Room 101", "work-number": "+12345", "home-number": None, "type": "add-user"}) result.addCallback(handle_callback) return result def test_add_user_event_utf8_wire_data(self): """ When an C{add-user} event with utf-8 decoded unicode string is received the user should be added. This is what the server is sending over the wire in the real-world. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "add_user succeeded"}, {"timestamp": 0, "type": "users", "operation-id": 123, "create-users": [ {"home-phone": u"請不要刪除", "username": u"請不要刪除", "uid": 1000, "enabled": True, "location": u"請不要刪除", "work-phone": u"請不要刪除", "name": u"請不要刪除", "primary-gid": 1000}]}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {'username': u'\u8acb\u4e0d\u8981\u522a\u9664', 'work-number': u'\u8acb\u4e0d\u8981\u522a\u9664', 'home-number': u'\u8acb\u4e0d\u8981\u522a\u9664', 'name': u'\u8acb\u4e0d\u8981\u522a\u9664', 'operation-id': 123, 'require-password-reset': False, 'password': u'\u8acb\u4e0d\u8981\u522a\u9664', 'type': 'add-user', 'primary-group-name': u'\u8acb\u4e0d\u8981\u522a\u9664', 'location': u'\u8acb\u4e0d\u8981\u522a\u9664'}) result.addCallback(handle_callback) return result def test_failing_add_user_event(self): """ When an C{add-user} event is received the user should be added. If not enough information is provided, we expect a single error, containing details of the failure. """ self.log_helper.ignore_errors(KeyError) def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertMessages(messages, [{"type": "operation-result", "status": FAILED, "operation-id": 123, "timestamp": 0, "result-text": "KeyError: 'username'"}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"name": "John Doe", "password": "password", "operation-id": 123, "require-password-reset": False, "type": "add-user"}) result.addCallback(handle_callback) return result def test_add_user_event_in_sync(self): """ The client and server should be in sync after an C{add-user} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) return result plugin = self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "name": "John Doe", "password": "password", "operation-id": 123, "require-password-reset": False, "primary-group-name": None, "type": "add-user", "location": None, "home-number": "+123456", "work-number": None}) result.addCallback(handle_callback1) return result def test_add_user_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before an C{add-user} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) messages = [messages[0], messages[2]] self.assertMessages(messages, [{"type": "users", "create-users": [{"home-phone": None, "name": "Bo", "username": "bo", "uid": 1000, "enabled": True, "location": None, "primary-gid": 1000, "work-phone": None}]}, {"type": "users", "operation-id": 123, "create-users": [{"home-phone": "+123456", "username": "jdoe", "uid": 1001, "enabled": True, "location": None, "work-phone": None, "name": "John Doe", "primary-gid": 1001}]}]) users = [("bo", "x", 1000, 1000, "Bo,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], None) result = self.manager.dispatch_message( {"username": "jdoe", "name": "John Doe", "password": "password", "operation-id": 123, "require-password-reset": False, "type": "add-user", "primary-group-name": None, "location": None, "work-number": None, "home-number": "+123456"}) result.addCallback(handle_callback) return result def test_edit_user_event(self): """ When a C{edit-user} message is received the user should be updated. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. self.assertMessages(messages[1:], [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 99, "timestamp": 0, "result-text": "set_user_details succeeded"}, {"update-users": [{"username": "jdoe", "uid": 1001, "enabled": True, "work-phone": "789WORK", "home-phone": "123HOME", "location": "Everywhere", "name": "John Doe", "primary-gid": 1001}], "timestamp": 0, "type": "users", "operation-id": 99}]) users = [("jdoe", "x", 1001, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] groups = [("users", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"uid": 1001, "username": "jdoe", "password": "password", "name": "John Doe", "location": "Everywhere", "work-number": "789WORK", "home-number": "123HOME", "operation-id": 99, "primary-group-name": u"users", "type": "edit-user"}) result.addCallback(handle_callback) return result def test_edit_user_event_in_sync(self): """ The client and server should be in sync after a C{edit-user} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): messages = self.broker_service.message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): mstore = self.broker_service.message_store new_messages = mstore.get_pending_messages() self.assertEqual(messages, new_messages) return result users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] plugin = self.setup_environment(users, [], None) result = self.manager.dispatch_message( {"username": "jdoe", "password": "password", "name": "John Doe", "location": "Everywhere", "work-number": "789WORK", "home-number": "123HOME", "primary-group-name": None, "type": "edit-user", "operation-id": 99}) result.addCallback(handle_callback1) return result def test_edit_user_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{edit-user} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-group-members": {u"users": [u"jdoe"]}, "create-groups": [{"gid": 1001, "name": u"users"}], "create-users": [{"home-phone": None, "work-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": None, "name": "John Doe", "primary-gid": 1000}]}, {"type": "users", "operation-id": 99, "update-users": [{"username": "jdoe", "uid": 1000, "enabled": True, "work-phone": "789WORK", "home-phone": "123HOME", "location": "Everywhere", "primary-gid": 1001, "name": "John Doe"}]}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] groups = [("users", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "password": "password", "name": "John Doe", "location": "Everywhere", "work-number": "789WORK", "home-number": "123HOME", "primary-group-name": u"users", "type": "edit-user", "operation-id": 99}) result.addCallback(handle_callback) return result def test_remove_user_event(self): """ When a C{remove-user} event is received, with the C{delete-home} parameter set to C{True}, the user and her home directory should be removed. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. self.assertMessages([messages[2], messages[1]], [{"timestamp": 0, "delete-users": ["jdoe"], "type": "users", "operation-id": 39}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 39, "timestamp": 0, "result-text": "remove_user succeeded"}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], None) result = self.manager.dispatch_message( {"username": "jdoe", "delete-home": True, "type": "remove-user", "operation-id": 39}) result.addCallback(handle_callback) return result def test_many_remove_user_events(self): """ The L{UserManager} can handle multiple remove-user events at the same time. """ users = [("foo", "x", 1000, 1000, "Foo,,,,", "/home/foo", "/bin/zsh"), ("bar", "x", 1001, 1001, "Bar,,,,", "/home/bar", "/bin/zsh")] self.setup_environment(users, [], None) def handle_callback(ignored): messages = self.broker_service.message_store.get_pending_messages() # Ignore the message created by plugin.run. messages = sorted([messages[1], messages[3]], key=lambda message: message["operation-id"]) self.assertMessages(messages, [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 39, "timestamp": 0, "result-text": "remove_user succeeded"}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 40, "timestamp": 0, "result-text": "remove_user succeeded"}]) results = [] results.append(self.manager.dispatch_message({"username": "foo", "delete-home": True, "type": "remove-user", "operation-id": 39})) results.append(self.manager.dispatch_message({"username": "bar", "delete-home": True, "type": "remove-user", "operation-id": 40})) return gather_results(results).addCallback(handle_callback) def test_failing_remove_user_event(self): """ When a C{remove-user} event is received, and the user doesn't exist, we expect a single message with the failure message. """ self.log_helper.ignore_errors(UserManagementError) def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: remove_user failed" self.assertMessages(messages, [{"type": "operation-result", "status": FAILED, "operation-id": 39, "timestamp": 0, "result-text": failure_string}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "delete-home": True, "type": "remove-user", "operation-id": 39}) result.addCallback(handle_callback) return result def test_remove_user_event_leave_home(self): """ When a C{remove-user} event is received, with the C{delete-home} parameter set to C{False}, the user should be removed without deleting the user's home directory. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = ( self.broker_service.message_store.get_pending_messages()) self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. self.assertMessages([messages[2], messages[1]], [{"timestamp": 0, "delete-users": ["jdoe"], "type": "users", "operation-id": 39}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 39, "timestamp": 0, "result-text": "remove_user succeeded"}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], None) result = self.manager.dispatch_message( {"username": "jdoe", "delete-home": False, "type": "remove-user", "operation-id": 39}) result.addCallback(handle_callback) return result def test_remove_user_event_in_sync(self): """ The client and server should be in sync after a C{remove-user} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "jdoe", "delete-home": True, "type": "remove-user", "operation-id": 39}) result.addCallback(handle_callback1) return result def test_remove_user_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{remove-user} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": "John Doe"}]}, {"type": "users", "delete-users": ["jdoe"], "operation-id": 39}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], None) result = self.manager.dispatch_message( {"username": "jdoe", "delete-home": True, "type": "remove-user", "operation-id": 39}) result.addCallback(handle_callback) return result def test_lock_user_event(self): """ When a C{lock-user} event is received the user should be locked out. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3, messages) # Ignore the message created by plugin.run. self.assertMessages([messages[2], messages[1]], [{"timestamp": 0, "type": "users", "operation-id": 99, "update-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": False, "location": None, "work-phone": None, "primary-gid": 1000, "name": u"John Doe"}]}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 99, "timestamp": 0, "result-text": "lock_user succeeded"}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "jdoe", "operation-id": 99, "type": "lock-user"}) result.addCallback(handle_callback) return result def test_failing_lock_user_event(self): """ When a C{lock-user} event is received the user should be locked out. However, if the user doesn't exist in the user database, we expect only a single failure message to be generated. """ self.log_helper.ignore_errors(UserManagementError) def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: lock_user failed" self.assertMessages(messages, [{"type": "operation-result", "status": FAILED, "operation-id": 99, "timestamp": 0, "result-text": failure_string}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "operation-id": 99, "type": "lock-user"}) result.addCallback(handle_callback) return result def test_lock_user_event_in_sync(self): """ The client and server should be in sync after a C{lock-user} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "jdoe", "type": "lock-user", "operation-id": 99}) result.addCallback(handle_callback1) return result def test_lock_user_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{lock-user} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": "John Doe"}]}, {"type": "users", "operation-id": 99, "update-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": False, "location": None, "work-phone": None, "primary-gid": 1000, "name": "John Doe"}]}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/home/bo", "/bin/zsh")] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "jdoe", "type": "lock-user", "operation-id": 99}) result.addCallback(handle_callback) return result def test_unlock_user_event(self): """ When an C{unlock-user} event is received the user should be enabled. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. self.assertMessages([messages[2], messages[1]], [{"timestamp": 0, "type": "users", "operation-id": 99, "update-users": [{"home-phone": None, "username": "psmith", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": u"Paul Smith"}]}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 99, "timestamp": 0, "result-text": "unlock_user succeeded"}]) users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", "/bin/zsh")] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "psmith", "type": "unlock-user", "operation-id": 99}) result.addCallback(handle_callback) return result def test_failing_unlock_user_event(self): """ When an C{unlock-user} event is received the user should be enabled. However, when the user doesn't exist in the user database, an error should be generated. """ self.log_helper.ignore_errors(UserManagementError) def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 1) failure_string = "UserManagementError: unlock_user failed" self.assertMessages(messages, [{"type": "operation-result", "status": FAILED, "operation-id": 99, "timestamp": 0, "result-text": failure_string}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"username": "jdoe", "operation-id": 99, "type": "unlock-user"}) result.addCallback(handle_callback) return result def test_unlock_user_event_in_sync(self): """ The client and server should be in sync after an C{unlock-user} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", "/bin/zsh")] plugin = self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "psmith", "operation-id": 99, "type": "unlock-user"}) result.addCallback(handle_callback) return result def test_unlock_user_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{unlock-user} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-users": [{"home-phone": None, "username": "psmith", "uid": 1000, "enabled": False, "location": None, "work-phone": None, "primary-gid": 1000, "name": "Paul Smith"}]}, {"type": "users", "operation-id": 99, "update-users": [{"home-phone": None, "username": "psmith", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": "Paul Smith"}]}]) users = [("psmith", "x", 1000, 1000, "Paul Smith,,,,", "/home/psmith", "/bin/zsh")] self.setup_environment(users, [], self.shadow_file) result = self.manager.dispatch_message( {"username": "psmith", "operation-id": 99, "type": "unlock-user"}) result.addCallback(handle_callback) return result class GroupOperationsMessagingTest(UserGroupTestBase): def test_add_group_event(self): """ When an C{add-group} message is received the group should be created. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 2) # Ignore the message created by plugin.run. self.assertMessages([messages[1], messages[0]], [{"type": "users", "timestamp": 0, "operation-id": 123, "create-groups": [{"gid": 1000, "name": "bizdev"}]}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "add_group succeeded"}]) self.setup_environment([], [], None) result = self.manager.dispatch_message( {"groupname": "bizdev", "type": "add-group", "operation-id": 123}) result.addCallback(handle_callback) return result def test_add_group_event_in_sync(self): """ The client and server should be in sync after an C{add-group} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) plugin = self.setup_environment([], [], None) result = self.manager.dispatch_message( {"groupname": "bizdev", "operation-id": 123, "type": "add-group"}) result.addCallback(handle_callback1) return result def test_add_group_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before an C{add-group} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # We skip the operation-result message. self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-groups": [{"gid": 1001, "name": "sales"}]}, {"type": "users", "operation-id": 123, "create-groups": [{"gid": 1002, "name": "bizdev"}]}]) groups = [("sales", "x", 1001, [])] self.setup_environment([], groups, None) result = self.manager.dispatch_message( {"groupname": "bizdev", "type": "add-group", "operation-id": 123}) result.addCallback(handle_callback) return result def test_edit_group_event(self): """ When an C{edit-group} message is received the specified group should be edited. This causes the originally named group to be removed and replaced with a newly named group with the new name. This generates C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. expected = [{"create-groups": [{"gid": 50, "name": "sales"}], "timestamp": 0, "type": "users"}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "set_group_details succeeded"}, {"delete-groups": ["sales"], "create-groups": [{"gid": 50, "name": "bizdev"}], "timestamp": 0, "operation-id": 123, "type": "users"}, ] self.assertMessages(messages, expected) groups = [("sales", "x", 50, [])] self.setup_environment([], groups, None) result = self.manager.dispatch_message( {"groupname": "sales", "new-name": "bizdev", "type": "edit-group", "operation-id": 123}) result.addCallback(handle_callback) return result def test_edit_group_event_in_sync(self): """ The client and server should be in sync after an C{edit-group} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) groups = [("sales", "x", 50, [])] plugin = self.setup_environment([], groups, None) result = self.manager.dispatch_message( {"gid": 50, "groupname": "sales", "new-name": "bizdev", "operation-id": 123, "type": "edit-group"}) result.addCallback(handle_callback1) return result def test_edit_group_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before an C{edit-group} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback1(result): result = self.manager.dispatch_message( {"groupname": "sales", "new-name": "webdev", "operation-id": 123, "type": "edit-group"}) result.addCallback(handle_callback2) return result def handle_callback2(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-groups": [{"gid": 1001, "name": "sales"}]}, {"type": "users", "operation-id": 123, "delete-groups": ["sales"], "create-groups": [{"gid": 1001, "name": "webdev"}]}]) groups = [("sales", "x", 1001, [])] plugin = self.setup_environment([], groups, None) result = plugin.run() result.addCallback(handle_callback1) return result def test_add_group_member_event(self): """ When an C{add-group-member} message is received the new user should be added to the group. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. expected = [{"type": "users", "timestamp": 0, "operation-id": 123, "create-group-members": {"bizdev": ["jdoe"]}}, {"type": "operation-result", "timestamp": 0, "status": SUCCEEDED, "operation-id": 123, "result-text": "add_group_member succeeded"}] self.assertMessages([messages[2], messages[1]], expected) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "groupname": "bizdev", "operation-id": 123, "type": "add-group-member"}) result.addCallback(handle_callback) return result def test_add_group_member_with_username_and_groupname_event(self): """ When an C{add-group-member} message is received with a username and group name, instead of a UID and GID, the new user should be added to the group. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. expected = [{"type": "users", "timestamp": 0, "operation-id": 123, "create-group-members": {"bizdev": ["jdoe"]}}, {"type": "operation-result", "timestamp": 0, "status": SUCCEEDED, "operation-id": 123, "result-text": "add_group_member succeeded"}] self.assertMessages([messages[2], messages[1]], expected) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "groupname": "bizdev", "type": "add-group-member", "operation-id": 123}) result.addCallback(handle_callback) return result def test_add_group_member_event_in_sync(self): """ The client and server should be in sync after an C{add-group-member} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, ["jdoe"])] plugin = self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": u"jdoe", "groupname": u"bizdev", "type": "add-group-member", "operation-id": 123}) result.addCallback(handle_callback) return result def test_add_group_member_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before an C{add-group-member} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) expected = [{"type": "users", "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": "John Doe"}], "create-groups": [{"gid": 1001, "name": "bizdev"}]}, {"type": "users", "operation-id": 123, "create-group-members": {"bizdev": ["jdoe"]}}] self.assertMessages([messages[0], messages[2]], expected) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, [])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "groupname": "bizdev", "type": "add-group-member", "operation-id": 123}) result.addCallback(handle_callback) return result def test_remove_group_member_event(self): """ When an C{add-group-member} message is received the user should be removed from the group. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created by plugin.run. self.assertMessages( [messages[2], messages[1]], [{"type": "users", "timestamp": 0, "operation-id": 123, "delete-group-members": {"bizdev": ["jdoe"]}}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "remove_group_member succeeded"}]) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "groupname": "bizdev", "type": "remove-group-member", "operation-id": 123}) result.addCallback(handle_callback) return result def test_remove_group_member_event_in_sync(self): """ The client and server should be in sync after an C{remove-group-member} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): messages = self.broker_service.message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, ["jdoe"])] plugin = self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"username": "jdoe", "groupname": "bizdev", "type": "remove-group-member", "operation-id": 123}) result.addCallback(handle_callback1) return result def test_remove_group_member_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{remove-group-member} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback(result): messages = self.broker_service.message_store.get_pending_messages() self.assertEqual(len(messages), 3) expected = [{"timestamp": 0, "type": "users", "create-users": [{"home-phone": None, "username": "jdoe", "uid": 1000, "enabled": True, "location": None, "work-phone": None, "primary-gid": 1000, "name": "John Doe"}], "create-groups": [{"gid": 1001, "name": "bizdev"}], "create-group-members": {"bizdev": ["jdoe"]}}, {"type": "users", "operation-id": 123, "delete-group-members": {"bizdev": ["jdoe"]}}] self.assertMessages([messages[0], messages[2]], expected) users = [("jdoe", "x", 1000, 1000, "John Doe,,,,", "/bin/sh", "/home/jdoe")] groups = [("bizdev", "x", 1001, ["jdoe"])] self.setup_environment(users, groups, None) result = self.manager.dispatch_message( {"groupname": "bizdev", "username": "jdoe", "type": "remove-group-member", "operation-id": 123}) result.addCallback(handle_callback) return result def test_remove_group_event(self): """ When a C{remove-group} message is received the specified group should be removeed. Two messages should be generated: a C{users} message with details about the change and an C{operation-result} with details of the outcome of the operation. """ def handle_callback1(result): result = self.manager.dispatch_message( {"groupname": "sales", "type": "remove-group", "operation-id": 123}) result.addCallback(handle_callback2) return result def handle_callback2(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertEqual(len(messages), 3) # Ignore the message created when the initial snapshot was # taken before the operation was performed. self.assertMessages([messages[2], messages[1]], [{"type": "users", "timestamp": 0, "operation-id": 123, "delete-groups": ["sales"]}, {"type": "operation-result", "status": SUCCEEDED, "operation-id": 123, "timestamp": 0, "result-text": "remove_group succeeded"}]) groups = [("sales", "x", 1001, ["jdoe"])] plugin = self.setup_environment([], groups, None) result = plugin.run() result.addCallback(handle_callback1) return result def test_remove_group_event_in_sync(self): """ The client and server should be in sync after a C{remove-group} event is received and processed. In other words, a snapshot should have been taken after the operation was handled. """ def handle_callback1(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertTrue(messages) result = plugin.run() result.addCallback(handle_callback2, messages) return result def handle_callback2(result, messages): message_store = self.broker_service.message_store new_messages = message_store.get_pending_messages() self.assertEqual(messages, new_messages) groups = [("sales", "x", 50, [])] plugin = self.setup_environment([], groups, None) result = self.manager.dispatch_message( {"groupname": "sales", "operation-id": 123, "type": "remove-group"}) result.addCallback(handle_callback1) return result def test_remove_group_event_with_external_changes(self): """ If external user changes have been made but not detected by the client before a C{remove-group} event is received, the client should first detect changes and then perform the operation. The results should be reported in separate messages. """ def handle_callback1(result): result = self.manager.dispatch_message( {"groupname": "sales", "operation-id": 123, "type": "remove-group"}) result.addCallback(handle_callback2) return result def handle_callback2(result): message_store = self.broker_service.message_store messages = message_store.get_pending_messages() self.assertEqual(len(messages), 3) self.assertMessages([messages[0], messages[2]], [{"type": "users", "create-groups": [{"gid": 1001, "name": "sales"}]}, {"type": "users", "delete-groups": ["sales"], "operation-id": 123}]) groups = [("sales", "x", 1001, [])] plugin = self.setup_environment([], groups, None) result = plugin.run() result.addCallback(handle_callback1) return result class UserManagerTest(LandscapeTest): def setUp(self): super(UserManagerTest, self).setUp() self.shadow_file = self.makeFile() self.user_manager = UserManager(shadow_file=self.shadow_file) def test_get_locked_usernames(self): """ The L{UserManager.get_locked_usernames} method returns only user names of locked users. """ fd = open(self.shadow_file, "w") fd.write("jdoe:$1$xFlQvTqe$cBtrNEDOIKMy/BuJoUdeG0:13348:0:99999:7:::\n" "psmith:!:13348:0:99999:7:::\n" "yo:$1$q7sz09uw$q.A3526M/SHu8vUb.Jo1A/:13349:0:99999:7:::\n") fd.close() self.assertEqual(self.user_manager.get_locked_usernames(), ["psmith"]) def test_get_locked_usernames_with_empty_shadow_file(self): """ The L{UserManager.get_locked_usernames} method returns an empty C{list} if the shadow file is empty. """ fd = open(self.shadow_file, "w") fd.write("\n") fd.close() self.assertEqual(self.user_manager.get_locked_usernames(), []) def test_get_locked_usernames_with_non_existing_shadow_file(self): """ The L{UserManager.get_locked_usernames} method returns an empty C{list} if the shadow file can't be read. An error message is logged as well. """ self.log_helper.ignore_errors("Error reading shadow file.*") self.assertFalse(os.path.exists(self.shadow_file)) self.assertEqual(self.user_manager.get_locked_usernames(), []) self.assertIn("Error reading shadow file. [Errno 2] No such file or " "directory", self.logfile.getvalue()) class RemoteUserManagerTest(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(RemoteUserManagerTest, self).setUp() def set_remote(remote): self.remote_user_manager = remote self.shadow_file = self.makeFile() self.user_manager = UserManager(shadow_file=self.shadow_file) self.user_manager_connector = RemoteUserManagerConnector(self.reactor, self.config) self.user_manager.register(self.manager) connected = self.user_manager_connector.connect() return connected.addCallback(set_remote) def tearDown(self): self.user_manager_connector.disconnect() self.user_manager.stop() return super(RemoteUserManagerTest, self).tearDown() def test_get_locked_usernames(self): """ The L{get_locked_usernames} method forwards the request to the remote L{UserManager} object. """ self.user_manager.get_locked_usernames = self.mocker.mock() self.expect(self.user_manager.get_locked_usernames()).result(["fred"]) self.mocker.replay() result = self.remote_user_manager.get_locked_usernames() return self.assertSuccess(result, ["fred"]) landscape-client-14.01/landscape/manager/tests/test_manager.py0000644000175000017500000000166212301414317024273 0ustar andreasandreasfrom landscape.manager.store import ManagerStore from landscape.tests.helpers import LandscapeTest, ManagerHelper class ManagerTest(LandscapeTest): helpers = [ManagerHelper] def test_reactor(self): """ A L{Manager} instance has a proper C{reactor} attribute. """ self.assertIs(self.manager.reactor, self.reactor) def test_broker(self): """ A L{Manager} instance has a proper C{broker} attribute referencing a connected L{RemoteBroker}. """ return self.assertSuccess(self.manager.broker.ping(), True) def test_config(self): """ A L{Manager} instance has a proper C{config} attribute. """ self.assertIs(self.manager.config, self.config) def test_store(self): """ A L{Manager} instance has a proper C{store} attribute. """ self.assertTrue(isinstance(self.manager.store, ManagerStore)) landscape-client-14.01/landscape/manager/tests/test_processkiller.py0000644000175000017500000002236212301414317025542 0ustar andreasandreasfrom datetime import datetime import signal import subprocess from landscape.tests.helpers import (LandscapeTest, ManagerHelper, ProcessDataBuilder) from landscape.lib.process import ProcessInformation from landscape.manager.plugin import SUCCEEDED, FAILED from landscape.manager.processkiller import ( ProcessKiller, ProcessNotFoundError, ProcessMismatchError, SignalProcessError) def get_active_process(): return subprocess.Popen(["python", "-c", "raw_input()"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def get_missing_pid(): popen = subprocess.Popen(["hostname"], stdout=subprocess.PIPE) popen.wait() return popen.pid class ProcessKillerTests(LandscapeTest): """Tests for L{ProcessKiller}.""" helpers = [ManagerHelper] def setUp(self): LandscapeTest.setUp(self) self.sample_dir = self.makeDir() self.builder = ProcessDataBuilder(self.sample_dir) self.process_info = ProcessInformation(proc_dir=self.sample_dir, jiffies=1, boot_time=10) self.signaller = ProcessKiller(process_info=self.process_info) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) def _test_signal_name(self, signame, signum): self.manager.add(self.signaller) self.builder.create_data(100, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="ooga") kill = self.mocker.replace("os.kill", passthrough=False) kill(100, signum) self.mocker.replay() self.manager.dispatch_message( {"type": "signal-process", "operation-id": 1, "pid": 100, "name": "ooga", "start-time": 20, "signal": signame}) def test_kill_process_signal(self): """ When specifying the signal name as 'KILL', os.kill should be passed the KILL signal. """ self._test_signal_name("KILL", signal.SIGKILL) def test_end_process_signal(self): """ When specifying the signal name as 'TERM', os.kill should be passed the TERM signal. """ self._test_signal_name("TERM", signal.SIGTERM) def _test_signal_real_process(self, signame): """ When a 'signal-process' message is received the plugin should signal the appropriate process and generate an operation-result message with details of the outcome. Data is gathered from internal plugin methods to get the start time of the test process being signalled. """ process_info_factory = ProcessInformation() signaller = ProcessKiller() signaller.register(self.manager) popen = get_active_process() process_info = process_info_factory.get_process_info(popen.pid) self.assertNotEquals(process_info, None) start_time = process_info["start-time"] self.manager.dispatch_message( {"type": "signal-process", "operation-id": 1, "pid": popen.pid, "name": "python", "start-time": start_time, "signal": signame}) # We're waiting on the child process here so that we (the # parent process) consume it's return code; this prevents it # from becoming a zombie and makes the test do a better job of # reflecting the real world. return_code = popen.wait() # The return code is negative if the process was terminated by # a signal. self.assertTrue(return_code < 0) process_info = process_info_factory.get_process_info(popen.pid) self.assertEqual(process_info, None) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "status": SUCCEEDED, "operation-id": 1}]) def test_kill_real_process(self): self._test_signal_real_process("KILL") def test_end_real_process(self): self._test_signal_real_process("TERM") def test_signal_missing_process(self): """ When a 'signal-process' message is received for a process that no longer the exists the plugin should generate an error. """ self.log_helper.ignore_errors(ProcessNotFoundError) self.manager.add(self.signaller) pid = get_missing_pid() self.manager.dispatch_message( {"operation-id": 1, "type": "signal-process", "pid": pid, "name": "zsh", "start-time": 110, "signal": "KILL"}) expected_text = ("ProcessNotFoundError: The process zsh with PID %d " "that started at 1970-01-01 00:01:50 UTC was not " "found" % (pid,)) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 1, "status": FAILED, "result-text": expected_text}]) self.assertTrue("ProcessNotFoundError" in self.logfile.getvalue()) def test_signal_process_start_time_mismatch(self): """ When a 'signal-process' message is received with a mismatched start time the plugin should generate an error. """ self.log_helper.ignore_errors(ProcessMismatchError) self.manager.add(self.signaller) pid = get_missing_pid() self.builder.create_data(pid, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="hostname") self.manager.dispatch_message( {"operation-id": 1, "type": "signal-process", "pid": pid, "name": "python", "start-time": 11, "signal": "KILL"}) expected_time = datetime.utcfromtimestamp(11) # boot time + proc start time = 20 actual_time = datetime.utcfromtimestamp(20) expected_text = ("ProcessMismatchError: The process python with " "PID %d that started at %s UTC was not found. A " "process with the same PID that started at %s UTC " "was found and not sent the KILL signal" % (pid, expected_time, actual_time)) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 1, "status": FAILED, "result-text": expected_text}]) self.assertTrue("ProcessMismatchError" in self.logfile.getvalue()) def test_signal_process_race(self): """ Before trying to signal a process it first checks to make sure a process with a matching PID and name exist. It's possible for the process to disappear after checking the process exists and before sending the signal; a generic error should be raised in that case. """ self.log_helper.ignore_errors(SignalProcessError) pid = get_missing_pid() self.builder.create_data(pid, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="hostname") self.assertRaises(SignalProcessError, self.signaller.signal_process, pid, "hostname", 20, "KILL") self.manager.add(self.signaller) self.manager.dispatch_message( {"operation-id": 1, "type": "signal-process", "pid": pid, "name": "hostname", "start-time": 20, "signal": "KILL"}) expected_text = ("SignalProcessError: Attempting to send the KILL " "signal to the process hostname with PID %d failed" % (pid,)) service = self.broker_service self.assertMessages(service.message_store.get_pending_messages(), [{"type": "operation-result", "operation-id": 1, "status": FAILED, "result-text": expected_text}]) self.assertTrue("SignalProcessError" in self.logfile.getvalue()) def test_accept_small_start_time_skews(self): """ The boot time isn't very precise, so accept small skews in the computed process start time. """ self.manager.add(self.signaller) self.builder.create_data(100, self.builder.RUNNING, uid=1000, gid=1000, started_after_boot=10, process_name="ooga") kill = self.mocker.replace("os.kill", passthrough=False) kill(100, signal.SIGKILL) self.mocker.replay() self.manager.dispatch_message( {"type": "signal-process", "operation-id": 1, "pid": 100, "name": "ooga", "start-time": 21, "signal": "KILL"}) landscape-client-14.01/landscape/manager/tests/__init__.py0000644000175000017500000000000012301414317023342 0ustar andreasandreaslandscape-client-14.01/landscape/manager/tests/test_customgraph.py0000644000175000017500000006423712301414317025224 0ustar andreasandreasimport os import pwd import logging from twisted.internet.error import ProcessDone from twisted.python.failure import Failure from landscape import SERVER_API from landscape.manager.customgraph import CustomGraphPlugin from landscape.manager.store import ManagerStore from landscape.tests.helpers import ( LandscapeTest, ManagerHelper, StubProcessFactory, DummyProcess) from landscape.tests.mocker import ANY class CustomGraphManagerTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(CustomGraphManagerTests, self).setUp() self.store = ManagerStore(":memory:") self.manager.store = self.store self.broker_service.message_store.set_accepted_types( ["custom-graph"]) self.data_path = self.makeDir() self.manager.config.data_path = self.data_path os.makedirs(os.path.join(self.data_path, "custom-graph-scripts")) self.manager.config.script_users = "ALL" self.graph_manager = CustomGraphPlugin( create_time=range(1500, 0, -300).pop) self.manager.add(self.graph_manager) def _exit_process_protocol(self, protocol, stdout): protocol.childDataReceived(1, stdout) for fd in (0, 1, 2): protocol.childConnectionLost(fd) protocol.processEnded(Failure(ProcessDone(0))) def test_add_graph(self): uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": username, "graph-id": 123}) self.assertEqual( self.store.get_graphs(), [(123, os.path.join(self.data_path, "custom-graph-scripts", "graph-123"), username)]) def test_add_graph_unknown_user(self): """ Attempting to add a graph with an unknown user should not result in an error, instead a message should be logged, the error will be picked up when the graph executes. """ mock_getpwnam = self.mocker.replace("pwd.getpwnam", passthrough=False) mock_getpwnam("foo") self.mocker.throw(KeyError("foo")) self.mocker.replay() error_message = "Attempt to add graph with unknown user foo" self.log_helper.ignore_errors(error_message) self.logger.setLevel(logging.ERROR) self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": "foo", "graph-id": 123}) graph = self.store.get_graph(123) self.assertEqual(graph[0], 123) self.assertEqual(graph[2], u"foo") self.assertTrue(error_message in self.logfile.getvalue()) def test_add_graph_for_user(self): mock_chown = self.mocker.replace("os.chown", passthrough=False) mock_chown(ANY, 1234, 5678) mock_chmod = self.mocker.replace("os.chmod", passthrough=False) mock_chmod(ANY, 0700) mock_getpwnam = self.mocker.replace("pwd.getpwnam", passthrough=False) class pwnam(object): pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() self.expect(mock_getpwnam("bar")).result(pwnam) self.mocker.replay() self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": "bar", "graph-id": 123}) self.assertEqual( self.store.get_graphs(), [(123, os.path.join(self.data_path, "custom-graph-scripts", "graph-123"), "bar")]) def test_remove_unknown_graph(self): self.manager.dispatch_message( {"type": "custom-graph-remove", "graph-id": 123}) def test_remove_graph(self): filename = self.makeFile() tempfile = file(filename, "w") tempfile.write("foo") tempfile.close() self.store.add_graph(123, filename, u"user") self.manager.dispatch_message( {"type": "custom-graph-remove", "graph-id": 123}) self.assertFalse(os.path.exists(filename)) def test_run(self): filename = self.makeFile() tempfile = file(filename, "w") tempfile.write("#!/bin/sh\necho 1") tempfile.close() os.chmod(filename, 0777) self.store.add_graph(123, filename, None) def check(ignore): self.graph_manager.exchange() script_hash = "483f2304b49063680c75e3c9e09cf6d0" self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"", "values": [(300, 1.0)], "script-hash": script_hash}}, "type": "custom-graph"}]) return self.graph_manager.run().addCallback(check) def test_run_multiple(self): filename = self.makeFile() tempfile = file(filename, "w") tempfile.write("#!/bin/sh\necho 1") tempfile.close() os.chmod(filename, 0777) self.store.add_graph(123, filename, None) filename = self.makeFile() tempfile = file(filename, "w") tempfile.write("#!/bin/sh\necho 2") tempfile.close() os.chmod(filename, 0777) self.store.add_graph(124, filename, None) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"", "values": [(300, 1.0)], "script-hash": "483f2304b49063680c75e3c9e09cf6d0" }, 124: {"error": u"", "values": [(300, 2.0)], "script-hash": "73a74b1530b2256db7edacb9b9cc385e" } }, "type": "custom-graph"}]) return self.graph_manager.run().addCallback(check) def test_run_with_nonzero_exit_code(self): filename = self.makeFile() tempfile = file(filename, "w") tempfile.write("#!/bin/sh\nexit 1") tempfile.close() os.chmod(filename, 0777) self.store.add_graph(123, filename, None) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u" (process exited with code 1)", "values": [], "script-hash": "eaca3ba1a3bf1948876eba320148c5e9" } }, "type": "custom-graph"}]) return self.graph_manager.run().addCallback(check) def test_run_cast_result_error(self): filename = self.makeFile("some_content") self.store.add_graph(123, filename, None) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertEqual(spawn[1], filename) self._exit_process_protocol(spawn[0], "foobar") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"InvalidFormatError: Failed to convert to " "number: 'foobar'", "values": [], "script-hash": "baab6c16d9143523b7865d46896e4596"}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_no_output_error(self): filename = self.makeFile("some_content") self.store.add_graph(123, filename, None) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertEqual(spawn[1], filename) self._exit_process_protocol(spawn[0], "") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"NoOutputError: Script did not output " "any value", "values": [], "script-hash": "baab6c16d9143523b7865d46896e4596"}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_no_output_error_with_other_result(self): filename1 = self.makeFile("some_content") self.store.add_graph(123, filename1, None) filename2 = self.makeFile("some_content") self.store.add_graph(124, filename2, None) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 2) spawn = factory.spawns[0] self._exit_process_protocol(spawn[0], "") spawn = factory.spawns[1] self._exit_process_protocol(spawn[0], "0.5") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"NoOutputError: Script did not output " "any value", "script-hash": "baab6c16d9143523b7865d46896e4596", "values": []}, 124: {"error": u"", "script-hash": "baab6c16d9143523b7865d46896e4596", "values": [(300, 0.5)]}}, "type": "custom-graph"}]) return result.addCallback(check) def test_multiple_errors(self): filename1 = self.makeFile("some_content") self.store.add_graph(123, filename1, None) filename2 = self.makeFile("some_content") self.store.add_graph(124, filename2, None) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 2) spawn = factory.spawns[0] self._exit_process_protocol(spawn[0], "foo") spawn = factory.spawns[1] self._exit_process_protocol(spawn[0], "") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"InvalidFormatError: Failed to convert " "to number: 'foo'", "script-hash": "baab6c16d9143523b7865d46896e4596", "values": []}, 124: {"error": u"NoOutputError: Script did not output " "any value", "script-hash": "baab6c16d9143523b7865d46896e4596", "values": []}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_user(self): filename = self.makeFile("some content") self.store.add_graph(123, filename, "bar") factory = StubProcessFactory() self.graph_manager.process_factory = factory mock_getpwnam = self.mocker.replace("pwd.getpwnam", passthrough=False) class pwnam(object): pw_uid = 1234 pw_gid = 5678 pw_dir = self.makeFile() self.expect(mock_getpwnam("bar")).result(pwnam) self.mocker.replay() result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.assertEqual(spawn[1], filename) self.assertEqual(spawn[2], ()) self.assertEqual(spawn[3], {}) self.assertEqual(spawn[4], "/") self.assertEqual(spawn[5], 1234) self.assertEqual(spawn[6], 5678) self._exit_process_protocol(spawn[0], "spam") return result def test_run_dissallowed_user(self): uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.config.script_users = "foo" self.store.add_graph(123, "filename", username) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"ProhibitedUserError: Custom graph cannot be run as " "user %s" % (username,), "script-hash": "", "values": []}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_unknown_user(self): mock_getpwnam = self.mocker.replace("pwd.getpwnam", passthrough=False) mock_getpwnam("foo") self.mocker.throw(KeyError("foo")) self.mocker.replay() self.manager.config.script_users = "foo" self.store.add_graph(123, "filename", "foo") factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"UnknownUserError: Unknown user 'foo'", "script-hash": "", "values": []}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_timeout(self): filename = self.makeFile("some content") self.store.add_graph(123, filename, None) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] protocol = spawn[0] protocol.makeConnection(DummyProcess()) self.assertEqual(spawn[1], filename) self.manager.reactor.advance(110) protocol.processEnded(Failure(ProcessDone(0))) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"Process exceeded the 10 seconds limit", "script-hash": "9893532233caff98cd083a116b013c0b", "values": []}}, "type": "custom-graph"}]) return result.addCallback(check) def test_run_removed_file(self): """ If run is called on a script file that has been removed, it doesn't try to run it, but report it with an empty hash value. """ self.store.add_graph(123, "/nonexistent", None) factory = StubProcessFactory() self.graph_manager.process_factory = factory self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"", "script-hash": "", "values": []}}, "type": "custom-graph"}]) def test_send_message_add_stored_graph(self): """ C{send_message} send the graph with no data, to notify the server of the existence of the script, even if the script hasn't been run yet. """ uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": username, "graph-id": 123}) self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "e00a2f44dbc7b6710ce32af2348aec9b", "values": []}}, "timestamp": 0, "type": "custom-graph"}]) def test_send_message_check_not_present_graph(self): """C{send_message} checks the presence of the custom-graph script.""" uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": username, "graph-id": 123}) filename = self.store.get_graph(123)[1] os.unlink(filename) self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {}, "timestamp": 0, "type": "custom-graph"}]) def test_send_message_dont_rehash(self): """ C{send_message} uses hash already stored if still no data has been found. """ uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": username, "graph-id": 123}) self.graph_manager.exchange() self.graph_manager._get_script_hash = lambda x: 1 / 0 self.graph_manager.do_send = True self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "e00a2f44dbc7b6710ce32af2348aec9b", "values": []}}, "timestamp": 0, "type": "custom-graph"}, {"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "e00a2f44dbc7b6710ce32af2348aec9b", "values": []}}, "timestamp": 0, "type": "custom-graph"}]) def test_send_message_rehash_if_necessary(self): uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo hi!", "username": username, "graph-id": 123}) self.graph_manager.exchange() self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo bye!", "username": username, "graph-id": 123}) self.graph_manager.do_send = True self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "e00a2f44dbc7b6710ce32af2348aec9b", "values": []}}, "timestamp": 0, "type": "custom-graph"}, {"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "d483816dc0fbb51ede42502a709b0e2a", "values": []}}, "timestamp": 0, "type": "custom-graph"}]) def test_run_with_script_updated(self): """ If a script is updated while a data point is being retrieved, the data point is discarded and no value is sent, but the new script is mentioned. """ uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo 1.0", "username": username, "graph-id": 123}) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo 2.0", "username": username, "graph-id": 123}) self._exit_process_protocol(spawn[0], "1.0") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {123: {"error": u"", "script-hash": "991e15a81929c79fe1d243b2afd99c62", "values": []}}, "timestamp": 0, "type": "custom-graph"}]) return result.addCallback(check) def test_run_with_script_removed(self): """ If a script is removed while a data point is being retrieved, the data point is discarded and no data is sent at all. """ uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo 1.0", "username": username, "graph-id": 123}) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 1) spawn = factory.spawns[0] self.manager.dispatch_message( {"type": "custom-graph-remove", "graph-id": 123}) self._exit_process_protocol(spawn[0], "1.0") def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"api": SERVER_API, "data": {}, "timestamp": 0, "type": "custom-graph"}]) return result.addCallback(check) def test_run_not_accepted_types(self): """ If "custom-graph" is not an accepted message-type anymore, C{CustomGraphPlugin.run} shouldn't even run the graph scripts. """ self.broker_service.message_store.set_accepted_types([]) uid = os.getuid() info = pwd.getpwuid(uid) username = info.pw_name self.manager.dispatch_message( {"type": "custom-graph-add", "interpreter": "/bin/sh", "code": "echo 1.0", "username": username, "graph-id": 123}) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) return result.addCallback(self.assertIdentical, None) def test_run_without_graph(self): """ If no graph is available, C{CustomGraphPlugin.run} doesn't even call C{call_if_accepted} on the broker and return immediately an empty list of results. """ self.graph_manager.registry.broker.call_if_accepted = ( lambda *args: 1 / 0) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) return result.addCallback(self.assertEqual, []) def test_run_unknown_user_with_unicode(self): """ Using a non-existent user containing unicode characters fails with the appropriate error message. """ username = u"non-existent-f\N{LATIN SMALL LETTER E WITH ACUTE}e" self.manager.config.script_users = "ALL" filename = self.makeFile("some content") self.store.add_graph(123, filename, username) factory = StubProcessFactory() self.graph_manager.process_factory = factory result = self.graph_manager.run() self.assertEqual(len(factory.spawns), 0) def check(ignore): self.graph_manager.exchange() self.assertMessages( self.broker_service.message_store.get_pending_messages(), [{"data": {123: {"error": u"UnknownUserError: Unknown user '%s'" % username, "script-hash": "9893532233caff98cd083a116b013c0b", "values": []}}, "type": "custom-graph"}]) return result.addCallback(check) landscape-client-14.01/landscape/manager/tests/test_config.py0000644000175000017500000000450712301414317024127 0ustar andreasandreasfrom landscape.tests.helpers import LandscapeTest from landscape.manager.config import ManagerConfiguration, ALL_PLUGINS from landscape.manager.scriptexecution import ALL_USERS class ManagerConfigurationTest(LandscapeTest): def setUp(self): super(ManagerConfigurationTest, self).setUp() self.config = ManagerConfiguration() def test_plugin_factories(self): """By default all plugins are enabled.""" self.assertEqual(["ProcessKiller", "PackageManager", "UserManager", "ShutdownManager", "AptSources", "HardwareInfo", "KeystoneToken", "HAService"], ALL_PLUGINS) self.assertEqual(ALL_PLUGINS, self.config.plugin_factories) def test_plugin_factories_with_manager_plugins(self): """ The C{--manager-plugins} command line option can be used to specify which plugins should be active. """ self.config.load(["--manager-plugins", "ProcessKiller"]) self.assertEqual(self.config.plugin_factories, ["ProcessKiller"]) def test_include_script_execution(self): """ Extra plugins can be specified with the C{--include-manager-plugins} command line option. """ self.config.load(["--include-manager-plugins", "ScriptExecution"]) self.assertEqual(len(self.config.plugin_factories), len(ALL_PLUGINS) + 1) self.assertTrue('ScriptExecution' in self.config.plugin_factories) def test_get_allowed_script_users(self): """ If no script users are specified, the default is 'nobody'. """ self.assertEqual(self.config.get_allowed_script_users(), ["nobody"]) def test_get_allowed_script_users_all(self): """ When script_users is C{ALL}, C{get_allowed_script_users} returns L{ALL_USERS}. """ self.config.load(["--script-users", "\tALL "]) self.assertIs(self.config.get_allowed_script_users(), ALL_USERS) def test_get_allowed_script_users_with_users(self): """ It's possible to specify a list of usernames to allow scripts to run as. """ self.config.load(["--script-users", "foo, bar,baz"]) self.assertEqual(self.config.get_allowed_script_users(), ["foo", "bar", "baz"]) landscape-client-14.01/landscape/manager/tests/test_haservice.py0000644000175000017500000003155312301414317024634 0ustar andreasandreasimport os from twisted.internet.defer import Deferred from landscape.manager.haservice import HAService from landscape.manager.plugin import SUCCEEDED, FAILED from landscape.tests.helpers import LandscapeTest, ManagerHelper from landscape.tests.mocker import ANY class HAServiceTests(LandscapeTest): helpers = [ManagerHelper] def setUp(self): super(HAServiceTests, self).setUp() self.ha_service = HAService() self.ha_service.JUJU_UNITS_BASE = self.makeDir() self.unit_name = "my-service/9" self.unit_path = "unit-" + self.unit_name.replace("/", "-") self.scripts_dir = os.path.join( self.ha_service.JUJU_UNITS_BASE, self.unit_path, "charm/scripts") self.health_check_d = os.path.join( self.scripts_dir, self.ha_service.HEALTH_SCRIPTS_DIR) # create entire dir path os.makedirs(self.health_check_d) self.manager.add(self.ha_service) cluster_online = self.makeFile( content="#!/bin/bash\nexit 0", basename="add_to_cluster", dirname=self.scripts_dir) os.chmod(cluster_online, 0755) cluster_standby = self.makeFile( content="#!/bin/bash\nexit 0", basename="remove_from_cluster", dirname=self.scripts_dir) os.chmod(cluster_standby, 0755) service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) def test_invalid_server_service_state_request(self): """ When the landscape server requests a C{service-state} other than 'online' or 'standby' the client responds with the appropriate error. """ logging_mock = self.mocker.replace("logging.error") logging_mock("Invalid cluster participation state requested BOGUS.") self.mocker.replay() self.manager.dispatch_message( {"type": "change-ha-service", "service-name": "my-service", "unit-name": self.unit_name, "service-state": "BOGUS", "operation-id": 1}) service = self.broker_service self.assertMessages( service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": u"Invalid cluster participation state requested BOGUS.", "status": FAILED, "operation-id": 1}]) def test_not_a_juju_computer(self): """ When not a juju charmed computer, L{HAService} reponds with an error due to missing JUJU_UNITS_BASE dir. """ self.ha_service.JUJU_UNITS_BASE = "/I/don't/exist" logging_mock = self.mocker.replace("logging.error") logging_mock("This computer is not deployed with juju. " "Changing high-availability service not supported.") self.mocker.replay() self.manager.dispatch_message( {"type": "change-ha-service", "service-name": "my-service", "unit-name": self.unit_name, "service-state": self.ha_service.STATE_STANDBY, "operation-id": 1}) service = self.broker_service self.assertMessages( service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": u"This computer is not deployed with juju. Changing " u"high-availability service not supported.", "status": FAILED, "operation-id": 1}]) def test_incorrect_juju_unit(self): """ When not the specific juju charmed computer, L{HAService} reponds with an error due to missing the JUJU_UNITS_BASE/$JUJU_UNIT dir. """ logging_mock = self.mocker.replace("logging.error") logging_mock("This computer is not juju unit some-other-service-0. " "Unable to modify high-availability services.") self.mocker.replay() self.manager.dispatch_message( {"type": "change-ha-service", "service-name": "some-other-service", "unit-name": "some-other-service-0", "service-state": "standby", "operation-id": 1}) service = self.broker_service self.assertMessages( service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": u"This computer is not juju unit some-other-service-0. " u"Unable to modify high-availability services.", "status": FAILED, "operation-id": 1}]) def test_wb_no_health_check_directory(self): """ When unable to find a valid C{HEALTH_CHECK_DIR}, L{HAService} will succeed but log an informational message. """ self.ha_service.HEALTH_SCRIPTS_DIR = "I/don't/exist" def should_not_be_called(result): self.fail( "_run_health_checks failed on absent health check directory.") def check_success_result(result): self.assertEqual( result, "Skipping juju charm health checks. No scripts at " "%s/I/don't/exist." % self.scripts_dir) result = self.ha_service._run_health_checks(self.scripts_dir) result.addCallbacks(check_success_result, should_not_be_called) def test_wb_no_health_check_scripts(self): """ When C{HEALTH_CHECK_DIR} exists but, no scripts exist, L{HAService} will log an informational message, but succeed. """ # In setup we created a health check directory but placed no health # scripts in it. def should_not_be_called(result): self.fail( "_run_health_checks failed on empty health check directory.") def check_success_result(result): self.assertEqual( result, "Skipping juju charm health checks. No scripts at " "%s/%s." % (self.scripts_dir, self.ha_service.HEALTH_SCRIPTS_DIR)) result = self.ha_service._run_health_checks(self.scripts_dir) result.addCallbacks(check_success_result, should_not_be_called) def test_wb_failed_health_script(self): """ L{HAService} runs all health check scripts found in the C{HEALTH_CHECK_DIR}. If any script fails, L{HAService} will return a deferred L{fail}. """ def expected_failure(result): self.assertEqual( str(result.value), "Failed charm script: %s/%s/my-health-script-2 " "exited with return code 1." % (self.scripts_dir, self.ha_service.HEALTH_SCRIPTS_DIR)) def check_success_result(result): self.fail( "_run_health_checks succeded despite a failed health script.") for number in [1, 2, 3]: script_path = ( "%s/my-health-script-%d" % (self.health_check_d, number)) health_script = file(script_path, "w") if number == 2: health_script.write("#!/bin/bash\nexit 1") else: health_script.write("#!/bin/bash\nexit 0") health_script.close() os.chmod(script_path, 0755) result = self.ha_service._run_health_checks(self.scripts_dir) result.addCallbacks(check_success_result, expected_failure) return result def test_missing_cluster_standby_or_cluster_online_scripts(self): """ When no cluster status change scripts are delivered by the charm, L{HAService} will still return a L{succeeded}. C{HEALTH_CHECK_DIR}. If any script fails, L{HAService} will return a deferred L{fail}. """ def should_not_be_called(result): self.fail( "_change_cluster_participation failed on absent charm script.") def check_success_result(result): self.assertEqual( result, "This computer is always a participant in its high-availabilty" " cluster. No juju charm cluster settings changed.") self.ha_service.CLUSTER_ONLINE = "I/don't/exist" self.ha_service.CLUSTER_STANDBY = "I/don't/exist" result = self.ha_service._change_cluster_participation( None, self.scripts_dir, self.ha_service.STATE_ONLINE) result.addCallbacks(check_success_result, should_not_be_called) # Now test the cluster standby script result = self.ha_service._change_cluster_participation( None, self.scripts_dir, self.ha_service.STATE_STANDBY) result.addCallbacks(check_success_result, should_not_be_called) return result def test_failed_cluster_standby_or_cluster_online_scripts(self): def expected_failure(result, script_path): self.assertEqual( str(result.value), "Failed charm script: %s exited with return code 2." % (script_path)) def check_success_result(result): self.fail( "_change_cluster_participation ignored charm script failure.") # Rewrite both cluster scripts as failures for script_name in [ self.ha_service.CLUSTER_ONLINE, self.ha_service.CLUSTER_STANDBY]: cluster_online = file( "%s/%s" % (self.scripts_dir, script_name), "w") cluster_online.write("#!/bin/bash\nexit 2") cluster_online.close() result = self.ha_service._change_cluster_participation( None, self.scripts_dir, self.ha_service.STATE_ONLINE) result.addCallback(check_success_result) script_path = ( "%s/%s" % (self.scripts_dir, self.ha_service.CLUSTER_ONLINE)) result.addErrback(expected_failure, script_path) # Now test the cluster standby script result = self.ha_service._change_cluster_participation( None, self.scripts_dir, self.ha_service.STATE_STANDBY) result.addCallback(check_success_result) script_path = ( "%s/%s" % (self.scripts_dir, self.ha_service.CLUSTER_STANDBY)) result.addErrback(expected_failure, script_path) return result def test_run_success_cluster_standby(self): """ When receives a C{change-ha-service message} with C{STATE_STANDBY} requested the manager runs the C{CLUSTER_STANDBY} script and returns a successful operation-result to the server. """ message = ({"type": "change-ha-service", "service-name": "my-service", "unit-name": self.unit_name, "service-state": self.ha_service.STATE_STANDBY, "operation-id": 1}) deferred = Deferred() def validate_messages(value): cluster_script = "%s/%s" % ( self.scripts_dir, self.ha_service.CLUSTER_STANDBY) service = self.broker_service self.assertMessages( service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": u"%s succeeded." % cluster_script, "status": SUCCEEDED, "operation-id": 1}]) def handle_has_run(handle_result_deferred): handle_result_deferred.chainDeferred(deferred) return deferred.addCallback(validate_messages) ha_service_mock = self.mocker.patch(self.ha_service) ha_service_mock.handle_change_ha_service(ANY) self.mocker.passthrough(handle_has_run) self.mocker.replay() self.manager.add(self.ha_service) self.manager.dispatch_message(message) return deferred def test_run_success_cluster_online(self): """ When receives a C{change-ha-service message} with C{STATE_ONLINE} requested the manager runs the C{CLUSTER_ONLINE} script and returns a successful operation-result to the server. """ message = ({"type": "change-ha-service", "service-name": "my-service", "unit-name": self.unit_name, "service-state": self.ha_service.STATE_ONLINE, "operation-id": 1}) deferred = Deferred() def validate_messages(value): cluster_script = "%s/%s" % ( self.scripts_dir, self.ha_service.CLUSTER_ONLINE) service = self.broker_service self.assertMessages( service.message_store.get_pending_messages(), [{"type": "operation-result", "result-text": u"%s succeeded." % cluster_script, "status": SUCCEEDED, "operation-id": 1}]) def handle_has_run(handle_result_deferred): handle_result_deferred.chainDeferred(deferred) return deferred.addCallback(validate_messages) ha_service_mock = self.mocker.patch(self.ha_service) ha_service_mock.handle_change_ha_service(ANY) self.mocker.passthrough(handle_has_run) self.mocker.replay() self.manager.add(self.ha_service) self.manager.dispatch_message(message) return deferred landscape-client-14.01/landscape/manager/aptsources.py0000644000175000017500000001224412301414317022646 0ustar andreasandreasimport glob import os import pwd import grp import shutil import tempfile from twisted.internet.defer import succeed from landscape.lib.twisted_util import spawn_process from landscape.manager.plugin import ManagerPlugin from landscape.package.reporter import find_reporter_command class ProcessError(Exception): """Exception raised when running a process fails.""" class AptSources(ManagerPlugin): """A plugin managing sources.list content.""" SOURCES_LIST = "/etc/apt/sources.list" SOURCES_LIST_D = "/etc/apt/sources.list.d" def register(self, registry): super(AptSources, self).register(registry) registry.register_message("apt-sources-replace", self._handle_repositories) def _run_process(self, command, args, uid=None, gid=None): """ Run the process in an asynchronous fashion, to be overriden in tests. """ return spawn_process(command, args, uid=uid, gid=gid) def _handle_process_error(self, result): """ Turn a failed process command (code != 0) to a C{ProcessError}. """ out, err, code = result if code: raise ProcessError("%s\n%s" % (out, err)) def _handle_process_failure(self, failure): """ Turn a signaled process command to a C{ProcessError}. """ if not failure.check(ProcessError): out, err, signal = failure.value raise ProcessError("%s\n%s" % (out, err)) else: return failure def _remove_and_continue(self, passthrough, path): """ Remove the temporary file created for the process, and forward the result. """ os.unlink(path) return passthrough def _handle_repositories(self, message): """ Handle a list of repositories to set on the machine. The format is the following: {"sources": [ {"name": "repository-name", "content": "deb http://archive.ubuntu.com/ubuntu/ maverick main\n\ "deb-src http://archive.ubuntu.com/ubuntu/ maverick main"} {"name": "repository-name-dev", "content": "deb http://archive.ubuntu.com/ubuntu/ maverick universe\n\ "deb-src http://archive.ubuntu.com/ubuntu/ maverick universe"}], "gpg-keys": ["-----BEGIN PGP PUBLIC KEY BLOCK-----\n\ XXXX -----END PGP PUBLIC KEY BLOCK-----", "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\ YYY -----END PGP PUBLIC KEY BLOCK-----"]} """ deferred = succeed(None) for key in message["gpg-keys"]: fd, path = tempfile.mkstemp() os.close(fd) key_file = file(path, "w") key_file.write(key) key_file.close() deferred.addCallback( lambda ignore, path=path: self._run_process("/usr/bin/apt-key", ["add", path])) deferred.addCallback(self._handle_process_error) deferred.addBoth(self._remove_and_continue, path) deferred.addErrback(self._handle_process_failure) deferred.addCallback(self._handle_sources, message["sources"]) return self.call_with_operation_result(message, lambda: deferred) def _handle_sources(self, ignored, sources): """Handle sources repositories.""" fd, path = tempfile.mkstemp() os.close(fd) new_sources = file(path, "w") for line in file(self.SOURCES_LIST): stripped_line = line.strip() if not stripped_line or stripped_line.startswith("#"): new_sources.write(line) else: new_sources.write("#%s" % line) new_sources.close() original_stat = os.stat(self.SOURCES_LIST) shutil.move(path, self.SOURCES_LIST) os.chmod(self.SOURCES_LIST, original_stat.st_mode) os.chown(self.SOURCES_LIST, original_stat.st_uid, original_stat.st_gid) for filename in glob.glob(os.path.join(self.SOURCES_LIST_D, "*.list")): shutil.move(filename, "%s.save" % filename) for source in sources: filename = os.path.join(self.SOURCES_LIST_D, "landscape-%s.list" % source["name"]) sources_file = file(filename, "w") sources_file.write(source["content"]) sources_file.close() os.chmod(filename, 0644) return self._run_reporter().addCallback(lambda ignored: None) def _run_reporter(self): """Once the repositories are modified, trigger a reporter run.""" reporter = find_reporter_command() # Force an apt-update run, because the sources.list has changed args = ["--force-apt-update"] if self.registry.config.config is not None: args.append("--config=%s" % self.registry.config.config) if os.getuid() == 0: uid = pwd.getpwnam("landscape").pw_uid gid = grp.getgrnam("landscape").gr_gid else: uid = None gid = None return self._run_process(reporter, args, uid=uid, gid=gid) landscape-client-14.01/landscape/manager/packagemanager.py0000644000175000017500000000611412301414317023403 0ustar andreasandreasimport logging import os from twisted.internet.utils import getProcessOutput from twisted.internet.defer import succeed from landscape.package.store import PackageStore from landscape.package.changer import PackageChanger from landscape.package.releaseupgrader import ReleaseUpgrader from landscape.manager.plugin import ManagerPlugin class PackageManager(ManagerPlugin): run_interval = 1800 _package_store = None def register(self, registry): super(PackageManager, self).register(registry) self.config = registry.config if not self._package_store: filename = os.path.join(registry.config.data_path, "package/database") self._package_store = PackageStore(filename) registry.register_message("change-packages", self.handle_change_packages) registry.register_message("change-package-locks", self.handle_change_package_locks) registry.register_message("release-upgrade", self.handle_release_upgrade) # When the package reporter notifies us that something has changed, # we want to run again to see if we can now fulfill tasks that were # skipped before. registry.reactor.call_on("package-data-changed", self.run) self.run() def _handle(self, cls, message): """Queue C{message} as a task, and spawn the proper handler.""" self._package_store.add_task(cls.queue_name, message) self.spawn_handler(cls) def handle_change_packages(self, message): return self._handle(PackageChanger, message) def handle_change_package_locks(self, message): return self._handle(PackageChanger, message) def handle_release_upgrade(self, message): return self._handle(ReleaseUpgrader, message) def run(self): result = self.registry.broker.get_accepted_message_types() result.addCallback(self._got_message_types) return result def _got_message_types(self, message_types): if "change-packages-result" in message_types: self.spawn_handler(PackageChanger) if "operation-result" in message_types: self.spawn_handler(ReleaseUpgrader) def spawn_handler(self, cls): args = ["--quiet"] if self.config.config: args.extend(["-c", self.config.config]) if self._package_store.get_next_task(cls.queue_name): # path is set to None so that getProcessOutput does not # chdir to "." see bug #211373 result = getProcessOutput(cls.find_command(), args=args, env=os.environ, errortoo=1, path=None) result.addCallback(self._got_output, cls) else: result = succeed(None) return result def _got_output(self, output, cls): if output: logging.warning("Package %s output:\n%s" % (cls.queue_name, output)) landscape-client-14.01/landscape/manager/hardwareinfo.py0000644000175000017500000000172212301414317023126 0ustar andreasandreasimport os from twisted.internet.utils import getProcessOutput from landscape.manager.plugin import ManagerPlugin class HardwareInfo(ManagerPlugin): """A plugin to retrieve hardware information.""" message_type = "hardware-info" run_interval = 60 * 60 * 24 run_immediately = True command = "/usr/bin/lshw" def register(self, registry): super(HardwareInfo, self).register(registry) self.call_on_accepted(self.message_type, self.send_message) def run(self): return self.registry.broker.call_if_accepted( self.message_type, self.send_message) def send_message(self): result = getProcessOutput( self.command, args=["-xml", "-quiet"], env=os.environ, path=None) return result.addCallback(self._got_output) def _got_output(self, output): message = {"type": self.message_type, "data": output} return self.registry.broker.send_message(message, self._session_id) landscape-client-14.01/landscape/manager/__init__.py0000644000175000017500000000020412301414317022206 0ustar andreasandreas""" The manager installs and removes packages, runs scripts and retrieves information that is only accessible to the root user. """ landscape-client-14.01/landscape/__init__.py0000644000175000017500000000271412301414317020604 0ustar andreasandreasDEBIAN_REVISION = "" UPSTREAM_VERSION = "13.10+bzr73" VERSION = "%s%s" % (UPSTREAM_VERSION, DEBIAN_REVISION) # The "server-api" field of outgoing messages will be set to this value, and # used by the server message system to lookup the correct MessageAPI adapter # for handling the messages sent by the client. Bump it when the schema of any # of the messages sent by the client changes in a backward-incompatible way. # # Changelog: # # 3.2: # * Add new "eucalyptus-info" and "eucalyptus-info-error" messages. # SERVER_API = "3.2" # XXX This is needed for backward compatibility in the server code importing # the API variable. We should eventually replace it in the server code. API = SERVER_API # The "client-api" field of outgoing messages will be set to this value, and # used by the server to know which schema do the message types accepted by the # client support. Bump it when the schema of an accepted message type changes # and update the changelog below as needed. # # Changelog: # # 3.3: # * Add "binaries" field to "change-packages" # * Add "policy" field to "change-packages" # * Add new "change-package-locks" client accepted message type. # # 3.4: # * Add "hold" field to "change-packages" # * Add "remove-hold" field to "change-packages" # # 3.5: # * Support per-exchange authentication tokens # # 3.6: # * Handle scopes in resynchronize requests # # 3.7: # * Server returns 402 Payment Required if the computer has no valid license. # CLIENT_API = "3.7" landscape-client-14.01/landscape/package/0000755000175000017500000000000012301414317020062 5ustar andreasandreaslandscape-client-14.01/landscape/package/reporter.py0000644000175000017500000006346112301414317022310 0ustar andreasandreasimport urlparse import logging import time import sys import os import glob import apt_pkg from twisted.internet.defer import Deferred, succeed from landscape.lib.sequenceranges import sequence_to_ranges from landscape.lib.twisted_util import gather_results, spawn_process from landscape.lib.fetch import fetch_async from landscape.lib.fs import touch_file from landscape.lib import bpickle from landscape.package.taskhandler import ( PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler) from landscape.package.store import UnknownHashIDRequest, FakePackageStore HASH_ID_REQUEST_TIMEOUT = 7200 MAX_UNKNOWN_HASHES_PER_REQUEST = 500 class PackageReporterConfiguration(PackageTaskHandlerConfiguration): """Specialized configuration for the Landscape package-reporter.""" def make_parser(self): """ Specialize L{Configuration.make_parser}, adding options reporter-specific options. """ parser = super(PackageReporterConfiguration, self).make_parser() parser.add_option("--force-apt-update", default=False, action="store_true", help="Force running apt-update.") return parser class PackageReporter(PackageTaskHandler): """Report information about the system packages. @cvar queue_name: Name of the task queue to pick tasks from. """ config_factory = PackageReporterConfiguration queue_name = "reporter" apt_update_filename = "/usr/lib/landscape/apt-update" sources_list_filename = "/etc/apt/sources.list" sources_list_directory = "/etc/apt/sources.list.d" _session_id = None _got_task = False def run(self): self._got_task = False result = Deferred() # Set us up to communicate properly result.addCallback(lambda x: self.get_session_id()) result.addCallback(lambda x: self.run_apt_update()) # If the appropriate hash=>id db is not there, fetch it result.addCallback(lambda x: self.fetch_hash_id_db()) # Attach the hash=>id database if available result.addCallback(lambda x: self.use_hash_id_db()) # Now, handle any queued tasks. result.addCallback(lambda x: self.handle_tasks()) # Then, remove any expired hash=>id translation requests. result.addCallback(lambda x: self.remove_expired_hash_id_requests()) # After that, check if we have any unknown hashes to request. result.addCallback(lambda x: self.request_unknown_hashes()) # Finally, verify if we have anything new to report to the server. result.addCallback(lambda x: self.detect_changes()) result.callback(None) return result def send_message(self, message): return self._broker.send_message( message, self._session_id, True) def fetch_hash_id_db(self): """ Fetch the appropriate pre-canned database of hash=>id mappings from the server. If the database is already present, it won't be downloaded twice. The format of the database filename is __, and it will be downloaded from the HTTP directory set in config.package_hash_id_url, or config.url/hash-id-databases if the former is not set. Fetch failures are handled gracefully and logged as appropriate. """ def fetch_it(hash_id_db_filename): if hash_id_db_filename is None: # Couldn't determine which hash=>id database to fetch, # just ignore the failure and go on return if os.path.exists(hash_id_db_filename): # We don't download twice return base_url = self._get_hash_id_db_base_url() if not base_url: logging.warning("Can't determine the hash=>id database url") return # Cast to str as pycurl doesn't like unicode url = str(base_url + os.path.basename(hash_id_db_filename)) def fetch_ok(data): hash_id_db_fd = open(hash_id_db_filename, "w") hash_id_db_fd.write(data) hash_id_db_fd.close() logging.info("Downloaded hash=>id database from %s" % url) def fetch_error(failure): exception = failure.value logging.warning("Couldn't download hash=>id database: %s" % str(exception)) result = fetch_async(url, cainfo=self._config.get("ssl_public_key")) result.addCallback(fetch_ok) result.addErrback(fetch_error) return result result = self._determine_hash_id_db_filename() result.addCallback(fetch_it) return result def _get_hash_id_db_base_url(self): base_url = self._config.get("package_hash_id_url") if not base_url: if not self._config.get("url"): # We really have no idea where to download from return None # If config.url is http://host:123/path/to/message-system # then we'll use http://host:123/path/to/hash-id-databases base_url = urlparse.urljoin(self._config.url.rstrip("/"), "hash-id-databases") return base_url.rstrip("/") + "/" def _apt_sources_have_changed(self): """Return a boolean indicating if the APT sources were modified.""" from landscape.monitor.packagemonitor import PackageMonitor filenames = [] if os.path.exists(self.sources_list_filename): filenames.append(self.sources_list_filename) if os.path.exists(self.sources_list_directory): filenames.extend( [os.path.join(self.sources_list_directory, filename) for filename in os.listdir(self.sources_list_directory)]) for filename in filenames: seconds_since_last_change = ( time.time() - os.path.getmtime(filename)) if seconds_since_last_change < PackageMonitor.run_interval: return True return False def _apt_update_timeout_expired(self, interval): """Check if the apt-update timeout has passed.""" if os.path.exists(self.update_notifier_stamp): stamp = self.update_notifier_stamp elif os.path.exists(self._config.update_stamp_filename): stamp = self._config.update_stamp_filename else: return True last_update = os.stat(stamp).st_mtime return (last_update + interval) < time.time() def run_apt_update(self): """Run apt-update and log a warning in case of non-zero exit code. @return: a deferred returning (out, err, code) """ if (self._config.force_apt_update or self._apt_sources_have_changed() or self._apt_update_timeout_expired( self._config.apt_update_interval)): result = spawn_process(self.apt_update_filename) def callback((out, err, code)): accepted_apt_errors = ( "Problem renaming the file /var/cache/apt/srcpkgcache.bin", "Problem renaming the file /var/cache/apt/pkgcache.bin") touch_file(self._config.update_stamp_filename) logging.debug( "'%s' exited with status %d (out='%s', err='%s')" % ( self.apt_update_filename, code, out, err)) if code != 0: logging.warning("'%s' exited with status %d (%s)" % ( self.apt_update_filename, code, err)) # Errors caused by missing cache files are acceptable, as # they are not an issue for the lists update process. # These errors can happen if an 'apt-get clean' is run # while 'apt-get update' is running. for message in accepted_apt_errors: if message in err: out, err, code = "", "", 0 break elif not self._facade.get_channels(): code = 1 err = ("There are no APT sources configured in %s or %s." % (self.sources_list_filename, self.sources_list_directory)) deferred = self._broker.call_if_accepted( "package-reporter-result", self.send_result, code, err) deferred.addCallback(lambda ignore: (out, err, code)) return deferred return result.addCallback(callback) else: logging.debug("'%s' didn't run, update interval has not passed" % self.apt_update_filename) return succeed(("", "", 0)) def send_result(self, code, err): """ Report the package reporter result to the server in a message. """ message = { "type": "package-reporter-result", "code": code, "err": err} return self.send_message(message) def handle_task(self, task): message = task.data if message["type"] == "package-ids": self._got_task = True return self._handle_package_ids(message) if message["type"] == "resynchronize": self._got_task = True return self._handle_resynchronize() def _handle_package_ids(self, message): unknown_hashes = [] try: request = self._store.get_hash_id_request(message["request-id"]) except UnknownHashIDRequest: # We've lost this request somehow. It will be re-requested later. return succeed(None) hash_ids = {} for hash, id in zip(request.hashes, message["ids"]): if id is None: unknown_hashes.append(hash) else: hash_ids[hash] = id self._store.set_hash_ids(hash_ids) logging.info("Received %d package hash => id translations, %d hashes " "are unknown." % (len(hash_ids), len(unknown_hashes))) if unknown_hashes: result = self._handle_unknown_packages(unknown_hashes) else: result = succeed(None) # Remove the request if everything goes well. result.addCallback(lambda x: request.remove()) return result def _handle_resynchronize(self): self._store.clear_available() self._store.clear_available_upgrades() self._store.clear_installed() self._store.clear_locked() # Don't clear the hash_id_requests table because the messages # associated with the existing requests might still have to be # delivered, and if we clear the table and later create a new request, # that new request could get the same id of one of the deleted ones, # and when the pending message eventually gets delivered the reporter # would think that the message is associated to the newly created # request, as it has the same id has the deleted request the message # actually refers to. This would cause the ids in the message to be # possibly mapped to the wrong hashes. # # This problem would happen for example when switching the client from # one Landscape server to another, because the uuid-changed event would # cause a resynchronize task to be created by the monitor. See #417122. return succeed(None) def _handle_unknown_packages(self, hashes): self._facade.ensure_channels_reloaded() hashes = set(hashes) added_hashes = [] packages = [] for package in self._facade.get_packages(): hash = self._facade.get_package_hash(package) if hash in hashes: added_hashes.append(hash) skeleton = self._facade.get_package_skeleton(package) packages.append({"type": skeleton.type, "name": skeleton.name, "version": skeleton.version, "section": skeleton.section, "summary": skeleton.summary, "description": skeleton.description, "size": skeleton.size, "installed-size": skeleton.installed_size, "relations": skeleton.relations}) if packages: logging.info("Queuing messages with data for %d packages to " "exchange urgently." % len(packages)) message = {"type": "add-packages", "packages": packages} result = self._send_message_with_hash_id_request(message, added_hashes) else: result = succeed(None) return result def remove_expired_hash_id_requests(self): now = time.time() timeout = now - HASH_ID_REQUEST_TIMEOUT def update_or_remove(is_pending, request): if is_pending: # Request is still in the queue. Update the timestamp. request.timestamp = now elif request.timestamp < timeout: # Request was delivered, and is older than the threshold. request.remove() results = [] for request in self._store.iter_hash_id_requests(): if request.message_id is None: # May happen in some rare cases, when a send_message() is # interrupted abruptly. If it just fails normally, the # request is removed and so we don't get here. request.remove() else: result = self._broker.is_message_pending(request.message_id) result.addCallback(update_or_remove, request) results.append(result) return gather_results(results) def request_unknown_hashes(self): """Detect available packages for which we have no hash=>id mappings. This method will verify if there are packages that APT knows about but for which we don't have an id yet (no hash => id translation), and deliver a message (unknown-package-hashes) to request them. Hashes previously requested won't be requested again, unless they have already expired and removed from the database. """ self._facade.ensure_channels_reloaded() unknown_hashes = set() for package in self._facade.get_packages(): hash = self._facade.get_package_hash(package) if self._store.get_hash_id(hash) is None: unknown_hashes.add(self._facade.get_package_hash(package)) # Discard unknown hashes in existent requests. for request in self._store.iter_hash_id_requests(): unknown_hashes -= set(request.hashes) if not unknown_hashes: result = succeed(None) else: unknown_hashes = sorted(unknown_hashes) unknown_hashes = unknown_hashes[:MAX_UNKNOWN_HASHES_PER_REQUEST] logging.info("Queuing request for package hash => id " "translation on %d hash(es)." % len(unknown_hashes)) message = {"type": "unknown-package-hashes", "hashes": unknown_hashes} result = self._send_message_with_hash_id_request(message, unknown_hashes) return result def _send_message_with_hash_id_request(self, message, unknown_hashes): """Create a hash_id_request and send message with "request-id".""" request = self._store.add_hash_id_request(unknown_hashes) message["request-id"] = request.id result = self.send_message(message) def set_message_id(message_id): request.message_id = message_id def send_message_failed(failure): request.remove() return failure return result.addCallbacks(set_message_id, send_message_failed) def detect_changes(self): """Detect all changes concerning packages. If some changes were detected with respect to our last run, then an event of type 'package-data-changed' will be fired in the broker reactor. """ def changes_detected(result): if result: # Something has changed, notify the broker. return self._broker.fire_event("package-data-changed") deferred = self.detect_packages_changes() return deferred.addCallback(changes_detected) def detect_packages_changes(self): """ Check if any information regarding packages have changed, and if so compute the changes and send a signal. """ if self._got_task or self._package_state_has_changed(): return self._compute_packages_changes() else: return succeed(None) def _package_state_has_changed(self): """ Detect changes in the universe of known packages. This uses the state of packages in /var/lib/dpkg/state and other files and simply checks whether they have changed using their "last changed" timestamp on the filesystem. @return True if the status changed, False otherwise. """ stamp_file = self._config.detect_package_changes_stamp if not os.path.exists(stamp_file): return True status_file = apt_pkg.config.find_file("dir::state::status") lists_dir = apt_pkg.config.find_dir("dir::state::lists") files = [status_file, lists_dir] files.extend(glob.glob("%s/*Packages" % lists_dir)) last_checked = os.stat(stamp_file).st_mtime for f in files: last_changed = os.stat(f).st_mtime if last_changed >= last_checked: return True return False def _compute_packages_changes(self): """Analyse changes in the universe of known packages. This method will verify if there are packages that: - are now installed, and were not; - are now available, and were not; - are now locked, and were not; - were previously available but are not anymore; - were previously installed but are not anymore; - were previously locked but are not anymore; Additionally it will report package locks that: - are now set, and were not; - were previously set but are not anymore; In all cases, the server is notified of the new situation with a "packages" message. @return: A deferred resulting in C{True} if package changes were detected with respect to the previous run, or C{False} otherwise. """ self._facade.ensure_channels_reloaded() old_installed = set(self._store.get_installed()) old_available = set(self._store.get_available()) old_upgrades = set(self._store.get_available_upgrades()) old_locked = set(self._store.get_locked()) current_installed = set() current_available = set() current_upgrades = set() current_locked = set() for package in self._facade.get_packages(): hash = self._facade.get_package_hash(package) id = self._store.get_hash_id(hash) if id is not None: if self._facade.is_package_installed(package): current_installed.add(id) if self._facade.is_package_available(package): current_available.add(id) else: current_available.add(id) # Are there any packages that this package is an upgrade for? if self._facade.is_package_upgrade(package): current_upgrades.add(id) for package in self._facade.get_locked_packages(): hash = self._facade.get_package_hash(package) id = self._store.get_hash_id(hash) if id is not None: current_locked.add(id) new_installed = current_installed - old_installed new_available = current_available - old_available new_upgrades = current_upgrades - old_upgrades new_locked = current_locked - old_locked not_installed = old_installed - current_installed not_available = old_available - current_available not_upgrades = old_upgrades - current_upgrades not_locked = old_locked - current_locked message = {} if new_installed: message["installed"] = \ list(sequence_to_ranges(sorted(new_installed))) if new_available: message["available"] = \ list(sequence_to_ranges(sorted(new_available))) if new_upgrades: message["available-upgrades"] = \ list(sequence_to_ranges(sorted(new_upgrades))) if new_locked: message["locked"] = \ list(sequence_to_ranges(sorted(new_locked))) if not_installed: message["not-installed"] = \ list(sequence_to_ranges(sorted(not_installed))) if not_available: message["not-available"] = \ list(sequence_to_ranges(sorted(not_available))) if not_upgrades: message["not-available-upgrades"] = \ list(sequence_to_ranges(sorted(not_upgrades))) if not_locked: message["not-locked"] = \ list(sequence_to_ranges(sorted(not_locked))) if not message: return succeed(False) message["type"] = "packages" result = self.send_message(message) logging.info("Queuing message with changes in known packages: " "%d installed, %d available, %d available upgrades, " "%d locked, %d not installed, %d not available, " "%d not available upgrades, %d not locked." % (len(new_installed), len(new_available), len(new_upgrades), len(new_locked), len(not_installed), len(not_available), len(not_upgrades), len(not_locked))) def update_currently_known(result): if new_installed: self._store.add_installed(new_installed) if not_installed: self._store.remove_installed(not_installed) if new_available: self._store.add_available(new_available) if new_locked: self._store.add_locked(new_locked) if not_available: self._store.remove_available(not_available) if new_upgrades: self._store.add_available_upgrades(new_upgrades) if not_upgrades: self._store.remove_available_upgrades(not_upgrades) if not_locked: self._store.remove_locked(not_locked) # Something has changed wrt the former run, let's update the # timestamp and return True. stamp_file = self._config.detect_package_changes_stamp touch_file(stamp_file) return True result.addCallback(update_currently_known) return result class FakeGlobalReporter(PackageReporter): """ A standard reporter, which additionally stores messages sent into its package store. """ package_store_class = FakePackageStore def send_message(self, message): self._store.save_message(message) return super(FakeGlobalReporter, self).send_message(message) class FakeReporter(PackageReporter): """ A fake reporter which only sends messages previously stored by a L{FakeGlobalReporter}. """ package_store_class = FakePackageStore global_store_filename = None def run(self): result = succeed(None) result.addCallback(lambda x: self.get_session_id()) # If the appropriate hash=>id db is not there, fetch it result.addCallback(lambda x: self.fetch_hash_id_db()) result.addCallback(lambda x: self._store.clear_tasks()) # Finally, verify if we have anything new to send to the server. result.addCallback(lambda x: self.send_pending_messages()) return result def send_pending_messages(self): """ As the last callback of L{PackageReporter}, sends messages stored. """ if self.global_store_filename is None: self.global_store_filename = os.environ["FAKE_PACKAGE_STORE"] if not os.path.exists(self.global_store_filename): return succeed(None) message_sent = set(self._store.get_message_ids()) global_store = FakePackageStore(self.global_store_filename) all_message_ids = set(global_store.get_message_ids()) not_sent = all_message_ids - message_sent deferred = succeed(None) got_type = set() if not_sent: messages = global_store.get_messages_by_ids(not_sent) sent = [] for message_id, message in messages: message = bpickle.loads(str(message)) if message["type"] not in got_type: got_type.add(message["type"]) sent.append(message_id) deferred.addCallback( lambda x, message=message: self.send_message(message)) self._store.save_message_ids(sent) return deferred def main(args): if "FAKE_GLOBAL_PACKAGE_STORE" in os.environ: return run_task_handler(FakeGlobalReporter, args) elif "FAKE_PACKAGE_STORE" in os.environ: return run_task_handler(FakeReporter, args) else: return run_task_handler(PackageReporter, args) def find_reporter_command(): dirname = os.path.dirname(os.path.abspath(sys.argv[0])) return os.path.join(dirname, "landscape-package-reporter") landscape-client-14.01/landscape/package/skeleton.py0000644000175000017500000001226312301414317022264 0ustar andreasandreasfrom landscape.lib.hashlib import sha1 import apt_pkg PACKAGE = 1 << 0 PROVIDES = 1 << 1 REQUIRES = 1 << 2 UPGRADES = 1 << 3 CONFLICTS = 1 << 4 DEB_PACKAGE = 1 << 16 | PACKAGE DEB_PROVIDES = 2 << 16 | PROVIDES DEB_NAME_PROVIDES = 3 << 16 | PROVIDES DEB_REQUIRES = 4 << 16 | REQUIRES DEB_OR_REQUIRES = 5 << 16 | REQUIRES DEB_UPGRADES = 6 << 16 | UPGRADES DEB_CONFLICTS = 7 << 16 | CONFLICTS class PackageTypeError(Exception): """Raised when an unsupported package type is passed to build_skeleton.""" class PackageSkeleton(object): section = None summary = None description = None size = None installed_size = None _hash = None def __init__(self, type, name, version): self.type = type self.name = name self.version = version self.relations = [] def add_relation(self, type, info): self.relations.append((type, info)) def get_hash(self): """Calculate the package hash. If C{set_hash} has been used, that hash will be returned and the hash won't be the calculated value. """ if self._hash is not None: return self._hash digest = sha1("[%d %s %s]" % (self.type, self.name, self.version)) self.relations.sort() for pair in self.relations: digest.update("[%d %s]" % pair) return digest.digest() def set_hash(self, package_hash): """Set the hash to an explicit value. This should be used when the hash is previously known and can't be calculated from the relations anymore. The only use case for this is package resurrection. We're planning on getting rid of package resurrection, and this code can be removed when that is done. """ self._hash = package_hash def relation_to_string(relation_tuple): """Convert an apt relation to a string representation. @param relation_tuple: A tuple, (name, version, relation). version and relation can be the empty string, if the relation is on a name only. Returns something like "name > 1.0" """ name, version, relation_type = relation_tuple relation_string = name if relation_type: relation_string += " %s %s" % (relation_type, version) return relation_string def parse_record_field(record, record_field, relation_type, or_relation_type=None): """Parse an apt C{Record} field and return skeleton relations @param record: An C{apt.package.Record} instance with package information. @param record_field: The name of the record field to parse. @param relation_type: The deb relation that can be passed to C{skeleton.add_relation()} @param or_relation_type: The deb relation that should be used if there is more than one value in a relation. """ relations = set() values = apt_pkg.parse_depends(record.get(record_field, "")) for value in values: value_strings = [relation_to_string(relation) for relation in value] value_relation_type = relation_type if len(value_strings) > 1: value_relation_type = or_relation_type relation_string = " | ".join(value_strings) relations.add((value_relation_type, relation_string)) return relations def build_skeleton_apt(version, with_info=False, with_unicode=False): """Build a package skeleton from an apt package. @param version: An instance of C{apt.package.Version} @param with_info: Whether to extract extra information about the package, like description, summary, size. @param with_unicode: Whether the C{name} and C{version} of the skeleton should be unicode strings. """ name, version_string = version.package.name, version.version if with_unicode: name, version_string = unicode(name), unicode(version_string) skeleton = PackageSkeleton(DEB_PACKAGE, name, version_string) relations = set() relations.update(parse_record_field( version.record, "Provides", DEB_PROVIDES)) relations.add(( DEB_NAME_PROVIDES, "%s = %s" % (version.package.name, version.version))) relations.update(parse_record_field( version.record, "Pre-Depends", DEB_REQUIRES, DEB_OR_REQUIRES)) relations.update(parse_record_field( version.record, "Depends", DEB_REQUIRES, DEB_OR_REQUIRES)) relations.add(( DEB_UPGRADES, "%s < %s" % (version.package.name, version.version))) relations.update(parse_record_field( version.record, "Conflicts", DEB_CONFLICTS)) relations.update(parse_record_field( version.record, "Breaks", DEB_CONFLICTS)) skeleton.relations = sorted(relations) if with_info: skeleton.section = version.section skeleton.summary = version.summary skeleton.description = version.description skeleton.size = version.size if version.installed_size > 0: skeleton.installed_size = version.installed_size if with_unicode: skeleton.section = skeleton.section.decode("utf-8") skeleton.summary = skeleton.summary.decode("utf-8") skeleton.description = skeleton.description.decode("utf-8") return skeleton landscape-client-14.01/landscape/package/facade.py0000644000175000017500000007637312301414317021657 0ustar andreasandreasimport time import logging import hashlib import os import subprocess import sys import tempfile from cStringIO import StringIO from operator import attrgetter # Importing apt throws a FutureWarning on hardy, that we don't want to # see. import warnings warnings.filterwarnings("ignore", module="apt", category=FutureWarning) del warnings import apt import apt_inst import apt_pkg from aptsources.sourceslist import SourcesList from apt.progress.text import AcquireProgress from apt.progress.base import InstallProgress from landscape.lib.fs import append_file, create_file, read_file from landscape.constants import UBUNTU_PATH from landscape.package.skeleton import build_skeleton_apt class TransactionError(Exception): """Raised when the transaction fails to run.""" class DependencyError(Exception): """Raised when a needed dependency wasn't explicitly marked.""" def __init__(self, packages): self.packages = packages def __str__(self): return ("Missing dependencies: %s" % ", ".join([str(package) for package in self.packages])) class ChannelError(Exception): """Raised when channels fail to load.""" class LandscapeAcquireProgress(AcquireProgress): def _winch(self, *dummy): """Override trying to get the column count of the buffer. We always send the output to a file, not to a terminal, so the default width (80 columns) is fine for us. Overriding this method means that we don't have to care about fcntl.ioctl API differences for different Python versions. """ class LandscapeInstallProgress(InstallProgress): dpkg_exited = None old_excepthook = None def wait_child(self): """Override to find out whether dpkg exited or not. The C{run()} method returns os.WEXITSTATUS(res) without checking os.WIFEXITED(res) first, so it can signal that everything is ok, even though something causes dpkg not to exit cleanly. Save whether dpkg exited cleanly into the C{dpkg_exited} attribute. If dpkg exited cleanly the exit code can be used to determine whether there were any errors. If dpkg didn't exit cleanly it should mean that something went wrong. """ res = super(LandscapeInstallProgress, self).wait_child() self.dpkg_exited = os.WIFEXITED(res) return res def fork(self): """Fork and override the excepthook in the child process.""" pid = super(LandscapeInstallProgress, self).fork() if pid == 0: # No need to clean up after ourselves, since the child # process will die after dpkg has been run. self.old_excepthook = sys.excepthook sys.excepthook = self._prevent_dpkg_apport_error return pid def _prevent_dpkg_apport_error(self, exc_type, exc_obj, exc_tb): """Prevent dpkg errors from generating Apport crash reports. When dpkg reports an error, a SystemError is raised and cleaned up in C code. However, it seems like the Apport except hook is called before the C code clears the error, generating crash reports even though nothing crashed. This exception hook doesn't call the Apport hook for SystemErrors, but it calls it for all other errors. """ if exc_type is SystemError: sys.__excepthook__(exc_type, exc_obj, exc_tb) return self.old_excepthook(exc_type, exc_obj, exc_tb) class AptFacade(object): """Wrapper for tasks using Apt. This object wraps Apt features, in a way that makes using and testing these features slightly more comfortable. @param root: The root dir of the Apt configuration files. @ivar refetch_package_index: Whether to refetch the package indexes when reloading the channels, or reuse the existing local database. """ max_dpkg_retries = 12 # number of dpkg retries before we give up dpkg_retry_sleep = 5 _dpkg_status = "/var/lib/dpkg/status" def __init__(self, root=None): self._root = root self._dpkg_args = [] if self._root is not None: self._ensure_dir_structure() self._dpkg_args.extend(["--root", self._root]) # don't use memonly=True here because of a python-apt bug on Natty when # sources.list contains invalid lines (LP: #886208) self._cache = apt.cache.Cache(rootdir=root) self._channels_loaded = False self._pkg2hash = {} self._hash2pkg = {} self._version_installs = [] self._global_upgrade = False self._version_removals = [] self._version_hold_creations = [] self._version_hold_removals = [] self.refetch_package_index = False def _ensure_dir_structure(self): self._ensure_sub_dir("etc/apt") self._ensure_sub_dir("etc/apt/sources.list.d") self._ensure_sub_dir("var/cache/apt/archives/partial") self._ensure_sub_dir("var/lib/apt/lists/partial") dpkg_dir = self._ensure_sub_dir("var/lib/dpkg") self._ensure_sub_dir("var/lib/dpkg/info") self._ensure_sub_dir("var/lib/dpkg/updates") self._ensure_sub_dir("var/lib/dpkg/triggers") create_file(os.path.join(dpkg_dir, "available"), "") self._dpkg_status = os.path.join(dpkg_dir, "status") if not os.path.exists(self._dpkg_status): create_file(self._dpkg_status, "") def _ensure_sub_dir(self, sub_dir): """Ensure that a dir in the Apt root exists.""" full_path = os.path.join(self._root, sub_dir) if not os.path.exists(full_path): os.makedirs(full_path) return full_path def get_packages(self): """Get all the packages available in the channels.""" return self._hash2pkg.itervalues() def get_locked_packages(self): """Get all packages in the channels that are locked. For Apt, it means all packages that are held. """ return [ version for version in self.get_packages() if (self.is_package_installed(version) and self._is_package_held(version.package))] def get_package_holds(self): """Return the name of all the packages that are on hold.""" return [version.package.name for version in self.get_locked_packages()] def _set_dpkg_selections(self, selection): """Set the dpkg selection. It basically does "echo $selection | dpkg --set-selections". """ process = subprocess.Popen( ["dpkg", "--set-selections"] + self._dpkg_args, stdin=subprocess.PIPE) process.communicate(selection) def set_package_hold(self, version): """Add a dpkg hold for a package. @param version: The version of the package to hold. """ self._set_dpkg_selections(version.package.name + " hold") def remove_package_hold(self, version): """Removes a dpkg hold for a package. @param version: The version of the package to unhold. """ if (not self.is_package_installed(version) or not self._is_package_held(version.package)): return self._set_dpkg_selections(version.package.name + " install") def reload_channels(self, force_reload_binaries=False): """Reload the channels and update the cache. @param force_reload_binaries: Whether to always reload information about the binaries packages that are in the facade's internal repo. """ self._cache.open(None) internal_sources_list = self._get_internal_sources_list() if (self.refetch_package_index or (force_reload_binaries and os.path.exists(internal_sources_list))): # Try to update only the internal repos, if the python-apt # version is new enough to accept a sources_list parameter. new_apt_args = {} if force_reload_binaries and not self.refetch_package_index: new_apt_args["sources_list"] = internal_sources_list try: try: self._cache.update(**new_apt_args) except TypeError: self._cache.update() except apt.cache.FetchFailedException: raise ChannelError( "Apt failed to reload channels (%r)" % ( self.get_channels())) self._cache.open(None) self._pkg2hash.clear() self._hash2pkg.clear() for package in self._cache: if not self._is_main_architecture(package): continue for version in package.versions: hash = self.get_package_skeleton( version, with_info=False).get_hash() # Use a tuple including the package, since the Version # objects of two different packages can have the same # hash. self._pkg2hash[(package, version)] = hash self._hash2pkg[hash] = version self._channels_loaded = True def ensure_channels_reloaded(self): """Reload the channels if they haven't been reloaded yet.""" if self._channels_loaded: return self.reload_channels() def _get_internal_sources_list(self): """Return the path to the source.list file for the facade channels.""" sources_dir = apt_pkg.config.find_dir("Dir::Etc::sourceparts") return os.path.join(sources_dir, "_landscape-internal-facade.list") def add_channel_apt_deb(self, url, codename, components=None): """Add a deb URL which points to a repository. @param url: The base URL of the repository. @param codename: The dist in the repository. @param components: The components to be included. """ sources_file_path = self._get_internal_sources_list() sources_line = "deb %s %s" % (url, codename) if components: sources_line += " %s" % " ".join(components) if os.path.exists(sources_file_path): current_content = read_file(sources_file_path).split("\n") if sources_line in current_content: return sources_line += "\n" append_file(sources_file_path, sources_line) def add_channel_deb_dir(self, path): """Add a directory with packages as a channel. @param path: The path to the directory containing the packages. A Packages file is created in the directory with information about the deb files. """ self._create_packages_file(path) self.add_channel_apt_deb("file://%s" % path, "./", None) def clear_channels(self): """Clear the channels that have been added through the facade. Channels that weren't added through the facade (i.e. /etc/apt/sources.list and /etc/apt/sources.list.d) won't be removed. """ sources_file_path = self._get_internal_sources_list() if os.path.exists(sources_file_path): os.remove(sources_file_path) def _create_packages_file(self, deb_dir): """Create a Packages file in a directory with debs.""" packages_contents = "\n".join( self.get_package_stanza(os.path.join(deb_dir, filename)) for filename in sorted(os.listdir(deb_dir))) create_file(os.path.join(deb_dir, "Packages"), packages_contents) def get_channels(self): """Return a list of channels configured. A channel is a deb line in sources.list or sources.list.d. It's represented by a dict with baseurl, distribution, components, and type keys. """ sources_list = SourcesList() return [{"baseurl": entry.uri, "distribution": entry.dist, "components": " ".join(entry.comps), "type": entry.type} for entry in sources_list if not entry.disabled] def reset_channels(self): """Remove all the configured channels.""" sources_list = SourcesList() for entry in sources_list: entry.set_enabled(False) sources_list.save() def get_package_stanza(self, deb_path): """Return a stanza for the package to be included in a Packages file. @param deb_path: The path to the deb package. """ deb_file = open(deb_path) deb = apt_inst.DebFile(deb_file) control = deb.control.extractdata("control") deb_file.close() filename = os.path.basename(deb_path) size = os.path.getsize(deb_path) contents = read_file(deb_path) md5 = hashlib.md5(contents).hexdigest() sha1 = hashlib.sha1(contents).hexdigest() sha256 = hashlib.sha256(contents).hexdigest() # Use rewrite_section to ensure that the field order is correct. return apt_pkg.rewrite_section( apt_pkg.TagSection(control), apt_pkg.REWRITE_PACKAGE_ORDER, [("Filename", filename), ("Size", str(size)), ("MD5sum", md5), ("SHA1", sha1), ("SHA256", sha256)]) def get_arch(self): """Return the architecture APT is configured to use.""" return apt_pkg.config.get("APT::Architecture") def set_arch(self, architecture): """Set the architecture that APT should use. Setting multiple architectures isn't supported. """ if architecture is None: architecture = "" # From oneiric and onwards Architectures is used to set which # architectures can be installed, in case multiple architectures # are supported. We force it to be single architecture, until we # have a plan for supporting multiple architectures. apt_pkg.config.clear("APT::Architectures") apt_pkg.config.set("APT::Architectures::", architecture) result = apt_pkg.config.set("APT::Architecture", architecture) # Reload the cache, otherwise architecture change isn't reflected in # package list self._cache.open(None) return result def get_package_skeleton(self, pkg, with_info=True): """Return a skeleton for the provided package. The skeleton represents the basic structure of the package. @param pkg: Package to build skeleton from. @param with_info: If True, the skeleton will include information useful for sending data to the server. Such information isn't necessary if the skeleton will be used to build a hash. @return: a L{PackageSkeleton} object. """ return build_skeleton_apt(pkg, with_info=with_info, with_unicode=True) def get_package_hash(self, version): """Return a hash from the given package. @param version: an L{apt.package.Version} object. """ return self._pkg2hash.get((version.package, version)) def get_package_hashes(self): """Get the hashes of all the packages available in the channels.""" return self._pkg2hash.values() def get_package_by_hash(self, hash): """Get the package having the provided hash. @param hash: The hash the package should have. @return: The L{apt.package.Package} that has the given hash. """ return self._hash2pkg.get(hash) def is_package_installed(self, version): """Is the package version installed?""" return version == version.package.installed def is_package_available(self, version): """Is the package available for installation?""" return version.downloadable def is_package_upgrade(self, version): """Is the package an upgrade for another installed package?""" if not version.package.is_upgradable or not version.package.installed: return False return version > version.package.installed def _is_main_architecture(self, package): """Is the package for the facade's main architecture?""" # package.name includes the architecture, if it's for a foreign # architectures. package.shortname never includes the # architecture. package.shortname doesn't exist on releases that # don't support multi-arch, though. if not hasattr(package, "shortname"): return True return package.name == package.shortname def _is_package_held(self, package): """Is the package marked as held?""" return package._pkg.selected_state == apt_pkg.SELSTATE_HOLD def get_packages_by_name(self, name): """Get all available packages matching the provided name. @param name: The name the returned packages should have. """ return [ version for version in self.get_packages() if version.package.name == name] def _get_broken_packages(self): """Return the packages that are in a broken state.""" return set( version.package for version in self.get_packages() if version.package.is_inst_broken) def _get_changed_versions(self, package): """Return the versions that will be changed for the package. Apt gives us that a package is going to be changed and have variables set on the package to indicate what will change. We need to convert that into a list of versions that will be either installed or removed, which is what the server expects to get. """ if package.marked_install: return [package.candidate] if package.marked_upgrade or package.marked_downgrade: return [package.installed, package.candidate] if package.marked_delete: return [package.installed] return None def _check_changes(self, requested_changes): """Check that the changes Apt will do have all been requested. @raises DependencyError: If some change hasn't been explicitly requested. @return: C{True} if all the changes that Apt will perform have been requested. """ # Build tuples of (package, version) so that we can do # comparison checks. Same versions of different packages compare # as being the same, so we need to include the package as well. all_changes = [ (version.package, version) for version in requested_changes] versions_to_be_changed = set() for package in self._cache.get_changes(): if not self._is_main_architecture(package): continue versions = self._get_changed_versions(package) versions_to_be_changed.update( (package, version) for version in versions) dependencies = versions_to_be_changed.difference(all_changes) if dependencies: raise DependencyError( [version for package, version in dependencies]) return len(versions_to_be_changed) > 0 def _get_unmet_relation_info(self, dep_relation): """Return a string representation of a specific dependency relation.""" info = dep_relation.target_pkg.name if dep_relation.target_ver: info += " (%s %s)" % ( dep_relation.comp_type, dep_relation.target_ver) reason = " but is not installable" if dep_relation.target_pkg.name in self._cache: dep_package = self._cache[dep_relation.target_pkg.name] if dep_package.installed or dep_package.marked_install: version = dep_package.candidate.version if dep_package not in self._cache.get_changes(): version = dep_package.installed.version reason = " but %s is to be installed" % version info += reason return info def _is_dependency_satisfied(self, dependency, dep_type): """Return whether a dependency is satisfied. For positive dependencies (Pre-Depends, Depends) it means that one of its targets is going to be installed. For negative dependencies (Conflicts, Breaks), it means that none of its targets are going to be installed. """ is_positive = dep_type not in ["Breaks", "Conflicts"] depcache = self._cache._depcache for or_dep in dependency: for target in or_dep.all_targets(): package = target.parent_pkg if ((package.current_state == apt_pkg.CURSTATE_INSTALLED or depcache.marked_install(package)) and not depcache.marked_delete(package)): return is_positive return not is_positive def _get_unmet_dependency_info(self): """Get information about unmet dependencies in the cache state. Go through all the broken packages and say which dependencies haven't been satisfied. @return: A string with dependency information like what you get from apt-get. """ broken_packages = self._get_broken_packages() if not broken_packages: return "" all_info = ["The following packages have unmet dependencies:"] for package in sorted(broken_packages, key=attrgetter("name")): found_dependency_error = False for dep_type in ["PreDepends", "Depends", "Conflicts", "Breaks"]: dependencies = package.candidate._cand.depends_list.get( dep_type, []) for dependency in dependencies: if self._is_dependency_satisfied(dependency, dep_type): continue relation_infos = [] for dep_relation in dependency: relation_infos.append( self._get_unmet_relation_info(dep_relation)) info = " %s: %s: " % (package.name, dep_type) or_divider = " or\n" + " " * len(info) all_info.append(info + or_divider.join(relation_infos)) found_dependency_error = True if not found_dependency_error: all_info.append( " %s: %s" % (package.name, "Unknown dependency error")) return "\n".join(all_info) def _set_frontend_noninteractive(self): """ Set the environment to avoid attempts by apt to interact with a user. """ os.environ["DEBIAN_FRONTEND"] = "noninteractive" os.environ["APT_LISTCHANGES_FRONTEND"] = "none" os.environ["APT_LISTBUGS_FRONTEND"] = "none" def _default_path_when_missing(self): """ If no PATH is set in the environment, use the Ubuntu default PATH. When the client is launched from the landscape-client-settings-ui the PATH variable is incorrectly set, this method rectifies that. """ # dpkg will fail if no path is set. if "PATH" not in os.environ: os.environ["PATH"] = UBUNTU_PATH def _setup_dpkg_for_changes(self): """ Setup environment and apt options for successful package operations. """ self._set_frontend_noninteractive() self._default_path_when_missing() apt_pkg.config.clear("DPkg::options") apt_pkg.config.set("DPkg::options::", "--force-confold") def _perform_hold_changes(self): """ Perform pending hold operations on packages. """ hold_changes = (len(self._version_hold_creations) > 0 or len(self._version_hold_removals) > 0) if not hold_changes: return None not_installed = [version for version in self._version_hold_creations if not self.is_package_installed(version)] if not_installed: raise TransactionError( "Cannot perform the changes, since the following " + "packages are not installed: %s" % ", ".join( [version.package.name for version in sorted(not_installed)])) for version in self._version_hold_creations: self.set_package_hold(version) for version in self._version_hold_removals: self.remove_package_hold(version) return "Package holds successfully changed." def _commit_package_changes(self): """ Commit cached APT operations and give feedback on the results as a string. """ fetch_output = StringIO() # Redirect stdout and stderr to a file. We need to work with the # file descriptors, rather than sys.stdout/stderr, since dpkg is # run in a subprocess. fd, install_output_path = tempfile.mkstemp() old_stdout = os.dup(1) old_stderr = os.dup(2) os.dup2(fd, 1) os.dup2(fd, 2) install_progress = LandscapeInstallProgress() try: # Since others (charms) might be installing packages on this system # We need to retry a bit in case dpkg is locked in progress dpkg_tries = 0 while dpkg_tries <= self.max_dpkg_retries: error = None if dpkg_tries > 0: # Yeah, sleeping isn't kosher according to Twisted, but # this code is run in the package-changer, which doesn't # have any concurrency going on. time.sleep(self.dpkg_retry_sleep) logging.warning( "dpkg process might be in use. " "Retrying package changes. %d retries remaining." % (self.max_dpkg_retries - dpkg_tries)) dpkg_tries += 1 try: self._cache.commit( fetch_progress=LandscapeAcquireProgress(fetch_output), install_progress=install_progress) if not install_progress.dpkg_exited: raise SystemError("dpkg didn't exit cleanly.") except (apt.cache.LockFailedException, SystemError), error: result_text = (fetch_output.getvalue() + read_file(install_output_path)) error = TransactionError(error.args[0] + "\n\nPackage operation log:\n" + result_text) else: result_text = (fetch_output.getvalue() + read_file(install_output_path)) break if error is not None: raise error finally: # Restore stdout and stderr. os.dup2(old_stdout, 1) os.dup2(old_stderr, 2) os.remove(install_output_path) return result_text def _preprocess_installs(self, fixer): for version in self._version_installs: # Set the candidate version, so that the version we want to # install actually is the one getting installed. version.package.candidate = version # Set auto_fix=False to avoid removing the package we asked to # install when we need to resolve dependencies. version.package.mark_install(auto_fix=False) fixer.clear(version.package._pkg) fixer.protect(version.package._pkg) def _preprocess_removes(self, fixer): held_package_names = set() package_installs = set( version.package for version in self._version_installs) package_upgrades = set( version.package for version in self._version_removals if version.package in package_installs) for version in self._version_removals: if self._is_package_held(version.package): held_package_names.add(version.package.name) if version.package in package_upgrades: # The server requests the old version to be removed for # upgrades, since Smart worked that way. For Apt we have # to take care not to mark upgraded packages for removal. continue version.package.mark_delete(auto_fix=False) # Configure the resolver in the same way # mark_delete(auto_fix=True) would have done. fixer.clear(version.package._pkg) fixer.protect(version.package._pkg) fixer.remove(version.package._pkg) fixer.install_protect() if held_package_names: raise TransactionError( "Can't perform the changes, since the following packages" + " are held: %s" % ", ".join(sorted(held_package_names))) def _preprocess_global_upgrade(self): if self._global_upgrade: self._cache.upgrade(dist_upgrade=True) def _resolve_broken_packages(self, fixer, already_broken_packages): """ Attempt to automatically resolve problems with broken packages. """ now_broken_packages = self._get_broken_packages() if now_broken_packages != already_broken_packages: try: fixer.resolve(True) except SystemError, error: raise TransactionError(error.args[0] + "\n" + self._get_unmet_dependency_info()) def _preprocess_package_changes(self): version_changes = self._version_installs[:] version_changes.extend(self._version_removals) if (not version_changes and not self._global_upgrade): return [] already_broken_packages = self._get_broken_packages() fixer = apt_pkg.ProblemResolver(self._cache._depcache) self._preprocess_installs(fixer) self._preprocess_global_upgrade() self._preprocess_removes(fixer) self._resolve_broken_packages(fixer, already_broken_packages) return version_changes def _perform_package_changes(self): """ Perform pending install/remove/upgrade operations. """ version_changes = self._preprocess_package_changes() if not self._check_changes(version_changes): return None return self._commit_package_changes() def perform_changes(self): """ Perform the pending package operations. """ self._setup_dpkg_for_changes() hold_result_text = self._perform_hold_changes() package_result_text = self._perform_package_changes() results = [] if package_result_text is not None: results.append(package_result_text) if hold_result_text is not None: results.append(hold_result_text) if len(results) > 0: return " ".join(results) def reset_marks(self): """Clear the pending package operations.""" del self._version_installs[:] del self._version_removals[:] del self._version_hold_removals[:] del self._version_hold_creations[:] self._global_upgrade = False self._cache.clear() def mark_install(self, version): """Mark the package for installation.""" self._version_installs.append(version) def mark_global_upgrade(self): """Upgrade all installed packages.""" self._global_upgrade = True def mark_remove(self, version): """Mark the package for removal.""" self._version_removals.append(version) def mark_hold(self, version): """Mark the package to be held.""" self._version_hold_creations.append(version) def mark_remove_hold(self, version): """Mark the package to have its hold removed.""" self._version_hold_removals.append(version) landscape-client-14.01/landscape/package/store.py0000644000175000017500000003600212301414317021571 0ustar andreasandreas"""Provide access to the persistent data used by L{PackageTaskHandler}s.""" import time try: import sqlite3 except ImportError: from pysqlite2 import dbapi2 as sqlite3 from landscape.lib import bpickle from landscape.lib.store import with_cursor class UnknownHashIDRequest(Exception): """Raised for unknown hash id requests.""" class InvalidHashIdDb(Exception): """Raised when trying to add an invalid hash=>id lookaside database.""" class HashIdStore(object): """C{HashIdStore} stores package hash=>id mappings in a file. The file is a SQLite database that contains a single table called "hash". The table schema is defined in L{ensure_hash_id_schema}. @param filename: The file where the mappings are persisted to. """ _db = None def __init__(self, filename): self._filename = filename def _ensure_schema(self): ensure_hash_id_schema(self._db) @with_cursor def set_hash_ids(self, cursor, hash_ids): """Set the ids of a set of hashes. @param hash_ids: a C{dict} of hash=>id mappings. """ for hash, id in hash_ids.iteritems(): cursor.execute("REPLACE INTO hash VALUES (?, ?)", (id, buffer(hash))) @with_cursor def get_hash_id(self, cursor, hash): """Return the id associated to C{hash}, or C{None} if not available.""" cursor.execute("SELECT id FROM hash WHERE hash=?", (buffer(hash),)) value = cursor.fetchone() if value: return value[0] return None @with_cursor def get_hash_ids(self, cursor): """Return a C{dict} holding all the available hash=>id mappings.""" cursor.execute("SELECT hash, id FROM hash") return dict([(str(row[0]), row[1]) for row in cursor.fetchall()]) @with_cursor def get_id_hash(self, cursor, id): """Return the hash associated to C{id}, or C{None} if not available.""" assert isinstance(id, (int, long)) cursor.execute("SELECT hash FROM hash WHERE id=?", (id,)) value = cursor.fetchone() if value: return str(value[0]) return None @with_cursor def clear_hash_ids(self, cursor): """Delete all hash=>id mappings.""" cursor.execute("DELETE FROM hash") @with_cursor def check_sanity(self, cursor): """Check database integrity. @raise: L{InvalidHashIdDb} if the filenme passed to the constructor is not a SQLite database or does not have a table called "hash" with a compatible schema. """ try: cursor.execute("SELECT id FROM hash WHERE hash=?", ("",)) except sqlite3.DatabaseError: raise InvalidHashIdDb(self._filename) class PackageStore(HashIdStore): """Persist data about system packages and L{PackageTaskHandler}'s tasks. This class extends L{HashIdStore} by adding tables to the SQLite database backend for storing information about the status of the system packages and about the tasks to be performed by L{PackageTaskHandler}s. The additional tables and schemas are defined in L{ensure_package_schema}. @param filename: The file where data is persisted to. """ def __init__(self, filename): super(PackageStore, self).__init__(filename) self._hash_id_stores = [] def _ensure_schema(self): super(PackageStore, self)._ensure_schema() ensure_package_schema(self._db) def add_hash_id_db(self, filename): """ Attach an additional "lookaside" hash=>id database. This method can be called more than once to attach several hash=>id databases, which will be queried *before* the main database, in the same the order they were added. If C{filename} is not a SQLite database or does not have a table called "hash" with a compatible schema, L{InvalidHashIdDb} is raised. @param filename: a secondary SQLite databases to look for pre-canned hash=>id mappings. """ hash_id_store = HashIdStore(filename) try: hash_id_store.check_sanity() except InvalidHashIdDb, e: # propagate the error raise e self._hash_id_stores.append(hash_id_store) def has_hash_id_db(self): """Return C{True} if one or more lookaside databases are attached.""" return len(self._hash_id_stores) > 0 def get_hash_id(self, hash): """Return the id associated to C{hash}, or C{None} if not available. This method composes the L{HashIdStore.get_hash_id} methods of all the attached lookaside databases, falling back to the main one, as described in L{add_hash_id_db}. """ assert isinstance(hash, basestring) # Check if we can find the hash=>id mapping in the lookaside stores for store in self._hash_id_stores: id = store.get_hash_id(hash) if id: return id # Fall back to the locally-populated db return HashIdStore.get_hash_id(self, hash) def get_id_hash(self, id): """Return the hash associated to C{id}, or C{None} if not available. This method composes the L{HashIdStore.get_id_hash} methods of all the attached lookaside databases, falling back to the main one in case the hash associated to C{id} is not found in any of them. """ for store in self._hash_id_stores: hash = store.get_id_hash(id) if hash is not None: return hash return HashIdStore.get_id_hash(self, id) @with_cursor def add_available(self, cursor, ids): for id in ids: cursor.execute("REPLACE INTO available VALUES (?)", (id,)) @with_cursor def remove_available(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) cursor.execute("DELETE FROM available WHERE id IN (%s)" % id_list) @with_cursor def clear_available(self, cursor): cursor.execute("DELETE FROM available") @with_cursor def get_available(self, cursor): cursor.execute("SELECT id FROM available") return [row[0] for row in cursor.fetchall()] @with_cursor def add_available_upgrades(self, cursor, ids): for id in ids: cursor.execute("REPLACE INTO available_upgrade VALUES (?)", (id,)) @with_cursor def remove_available_upgrades(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) cursor.execute("DELETE FROM available_upgrade WHERE id IN (%s)" % id_list) @with_cursor def clear_available_upgrades(self, cursor): cursor.execute("DELETE FROM available_upgrade") @with_cursor def get_available_upgrades(self, cursor): cursor.execute("SELECT id FROM available_upgrade") return [row[0] for row in cursor.fetchall()] @with_cursor def add_installed(self, cursor, ids): for id in ids: cursor.execute("REPLACE INTO installed VALUES (?)", (id,)) @with_cursor def remove_installed(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) cursor.execute("DELETE FROM installed WHERE id IN (%s)" % id_list) @with_cursor def clear_installed(self, cursor): cursor.execute("DELETE FROM installed") @with_cursor def get_installed(self, cursor): cursor.execute("SELECT id FROM installed") return [row[0] for row in cursor.fetchall()] @with_cursor def get_locked(self, cursor): """Get the package ids of all locked packages.""" cursor.execute("SELECT id FROM locked") return [row[0] for row in cursor.fetchall()] @with_cursor def add_locked(self, cursor, ids): """Add the given package ids to the list of locked packages.""" for id in ids: cursor.execute("REPLACE INTO locked VALUES (?)", (id,)) @with_cursor def remove_locked(self, cursor, ids): id_list = ",".join(str(int(id)) for id in ids) cursor.execute("DELETE FROM locked WHERE id IN (%s)" % id_list) @with_cursor def clear_locked(self, cursor): """Remove all the package ids in the locked table.""" cursor.execute("DELETE FROM locked") @with_cursor def add_hash_id_request(self, cursor, hashes): hashes = list(hashes) cursor.execute("INSERT INTO hash_id_request (hashes, timestamp)" " VALUES (?,?)", (buffer(bpickle.dumps(hashes)), time.time())) return HashIDRequest(self._db, cursor.lastrowid) @with_cursor def get_hash_id_request(self, cursor, request_id): cursor.execute("SELECT 1 FROM hash_id_request WHERE id=?", (request_id,)) if not cursor.fetchone(): raise UnknownHashIDRequest(request_id) return HashIDRequest(self._db, request_id) @with_cursor def iter_hash_id_requests(self, cursor): cursor.execute("SELECT id FROM hash_id_request") return [HashIDRequest(self._db, row[0]) for row in cursor.fetchall()] @with_cursor def clear_hash_id_requests(self, cursor): cursor.execute("DELETE FROM hash_id_request") @with_cursor def add_task(self, cursor, queue, data): data = bpickle.dumps(data) cursor.execute("INSERT INTO task (queue, timestamp, data) " "VALUES (?,?,?)", (queue, time.time(), buffer(data))) return PackageTask(self._db, cursor.lastrowid) @with_cursor def get_next_task(self, cursor, queue): cursor.execute("SELECT id FROM task WHERE queue=? ORDER BY timestamp", (queue,)) row = cursor.fetchone() if row: return PackageTask(self._db, row[0]) return None @with_cursor def clear_tasks(self, cursor, except_tasks=()): cursor.execute("DELETE FROM task WHERE id NOT IN (%s)" % ",".join([str(task.id) for task in except_tasks])) class FakePackageStore(PackageStore): """ A L{PackageStore} with an additional message table to store sent messages. """ def _ensure_schema(self): super(FakePackageStore, self)._ensure_schema() ensure_fake_package_schema(self._db) @with_cursor def save_message(self, cursor, message): cursor.execute("INSERT INTO message (data) VALUES (?)", (buffer(bpickle.dumps(message)),)) @with_cursor def get_message_ids(self, cursor): return [row[0] for row in cursor.execute("SELECT id FROM message").fetchall()] @with_cursor def save_message_ids(self, cursor, message_ids): cursor.executemany( "INSERT INTO message (id) VALUES (?)", [(message_id,) for message_id in message_ids]) @with_cursor def get_messages_by_ids(self, cursor, message_ids): params = ", ".join(["?"] * len(message_ids)) result = cursor.execute( "SELECT id, data FROM message WHERE id IN (%s) " "ORDER BY id" % params, tuple(message_ids)).fetchall() return [(row[0], row[1]) for row in result] class HashIDRequest(object): def __init__(self, db, id): self._db = db self.id = id @property @with_cursor def hashes(self, cursor): cursor.execute("SELECT hashes FROM hash_id_request WHERE id=?", (self.id,)) return bpickle.loads(str(cursor.fetchone()[0])) @with_cursor def _get_timestamp(self, cursor): cursor.execute("SELECT timestamp FROM hash_id_request WHERE id=?", (self.id,)) return cursor.fetchone()[0] @with_cursor def _set_timestamp(self, cursor, value): cursor.execute("UPDATE hash_id_request SET timestamp=? WHERE id=?", (value, self.id)) timestamp = property(_get_timestamp, _set_timestamp) @with_cursor def _get_message_id(self, cursor): cursor.execute("SELECT message_id FROM hash_id_request WHERE id=?", (self.id,)) return cursor.fetchone()[0] @with_cursor def _set_message_id(self, cursor, value): cursor.execute("UPDATE hash_id_request SET message_id=? WHERE id=?", (value, self.id)) message_id = property(_get_message_id, _set_message_id) @with_cursor def remove(self, cursor): cursor.execute("DELETE FROM hash_id_request WHERE id=?", (self.id,)) class PackageTask(object): def __init__(self, db, id): self._db = db self.id = id cursor = db.cursor() try: cursor.execute("SELECT queue, timestamp, data FROM task " "WHERE id=?", (id,)) row = cursor.fetchone() finally: cursor.close() self.queue = row[0] self.timestamp = row[1] self.data = bpickle.loads(str(row[2])) @with_cursor def remove(self, cursor): cursor.execute("DELETE FROM task WHERE id=?", (self.id,)) def ensure_hash_id_schema(db): """Create all tables needed by a L{HashIdStore}. @param db: A connection to a SQLite database. """ cursor = db.cursor() try: cursor.execute("CREATE TABLE hash" " (id INTEGER PRIMARY KEY, hash BLOB UNIQUE)") except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() else: cursor.close() db.commit() def ensure_package_schema(db): """Create all tables needed by a L{PackageStore}. @param db: A connection to a SQLite database. """ # FIXME This needs a "patch" table with a "version" column which will # help with upgrades. It should also be used to decide when to # create the schema from the ground up, rather than that using # try block. cursor = db.cursor() try: cursor.execute("CREATE TABLE locked" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE available" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE available_upgrade" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE installed" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE hash_id_request" " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," " message_id INTEGER, hashes BLOB)") cursor.execute("CREATE TABLE task" " (id INTEGER PRIMARY KEY, queue TEXT," " timestamp TIMESTAMP, data BLOB)") except sqlite3.OperationalError: cursor.close() db.rollback() else: cursor.close() db.commit() def ensure_fake_package_schema(db): cursor = db.cursor() try: cursor.execute("CREATE TABLE message" " (id INTEGER PRIMARY KEY, data BLOB)") except (sqlite3.OperationalError, sqlite3.DatabaseError): cursor.close() db.rollback() else: cursor.close() db.commit() landscape-client-14.01/landscape/package/releaseupgrader.py0000644000175000017500000003303012301414317023605 0ustar andreasandreasimport os import sys import grp import pwd import shutil import logging import tarfile import cStringIO import ConfigParser from twisted.internet.defer import succeed from landscape.lib.fetch import url_to_filename, fetch_to_files from landscape.lib.lsb_release import parse_lsb_release, LSB_RELEASE_FILENAME from landscape.lib.gpg import gpg_verify from landscape.lib.fs import read_file from landscape.package.taskhandler import ( PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler) from landscape.lib.twisted_util import spawn_process from landscape.manager.manager import SUCCEEDED, FAILED from landscape.package.reporter import find_reporter_command class ReleaseUpgraderConfiguration(PackageTaskHandlerConfiguration): """Specialized configuration for the Landscape release-upgrader.""" @property def upgrade_tool_directory(self): """ The directory where the upgrade-tool files get stored and extracted. """ return os.path.join(self.package_directory, "upgrade-tool") class ReleaseUpgrader(PackageTaskHandler): """Perform release upgrades. @cvar config_factory: The configuration class to use to build configuration objects to be passed to our constructor. @cvar queue_name: The queue we pick tasks from. @cvar lsb_release_filename: The path to the LSB data on the file system. @cvar landscape_ppa_url: The URL of the Landscape PPA, if it is present in the computer's sources.list it won't be commented out. @cvar logs_directory: Path to the directory holding the upgrade-tool logs. @cvar logs_limit: When reporting upgrade-tool logs to the server, only the last C{logs_limit} lines will be sent. """ config_factory = ReleaseUpgraderConfiguration queue_name = "release-upgrader" lsb_release_filename = LSB_RELEASE_FILENAME landscape_ppa_url = "http://ppa.launchpad.net/landscape/trunk/ubuntu/" logs_directory = "/var/log/dist-upgrade" logs_limit = 100000 def make_operation_result_message(self, operation_id, status, text, code): """Convenience to create messages of type C{"operation-result"}.""" return {"type": "operation-result", "operation-id": operation_id, "status": status, "result-text": text, "result-code": code} def handle_task(self, task): """Call the proper handler for the given C{task}.""" message = task.data if message["type"] == "release-upgrade": return self.handle_release_upgrade(message) def handle_release_upgrade(self, message): """Fetch the upgrade-tool, verify it and run it. @param message: A message of type C{"release-upgrade"}. """ target_code_name = message["code-name"] operation_id = message["operation-id"] lsb_release_info = parse_lsb_release(self.lsb_release_filename) current_code_name = lsb_release_info["code-name"] if target_code_name == current_code_name: message = self.make_operation_result_message( operation_id, FAILED, "The system is already running %s." % target_code_name, 1) logging.info("Queuing message with release upgrade failure to " "exchange urgently.") return self._broker.send_message(message, self._session_id, True) tarball_url = message["upgrade-tool-tarball-url"] signature_url = message["upgrade-tool-signature-url"] allow_third_party = message.get("allow-third-party", False) debug = message.get("debug", False) mode = None if current_code_name == "dapper": # On Dapper the upgrade tool must be passed "--mode server" # when run on a server system. As there is no simple and # reliable way to detect if a system is a desktop one, and as # the desktop edition is no longer supported, we default to server # mode. mode = "server" directory = self._config.upgrade_tool_directory tarball_filename = url_to_filename(tarball_url, directory=directory) signature_filename = url_to_filename(signature_url, directory=directory) result = self.fetch(tarball_url, signature_url) result.addCallback(lambda x: self.verify(tarball_filename, signature_filename)) result.addCallback(lambda x: self.extract(tarball_filename)) result.addCallback(lambda x: self.tweak(current_code_name)) result.addCallback(lambda x: self.upgrade( target_code_name, operation_id, allow_third_party=allow_third_party, debug=debug, mode=mode)) result.addCallback(lambda x: self.finish()) result.addErrback(self.abort, operation_id) return result def fetch(self, tarball_url, signature_url): """Fetch the upgrade-tool files. @param tarball_url: The upgrade-tool tarball URL. @param signature_url: The upgrade-tool signature URL. """ if not os.path.exists(self._config.upgrade_tool_directory): os.mkdir(self._config.upgrade_tool_directory) result = fetch_to_files([tarball_url, signature_url], self._config.upgrade_tool_directory, logger=logging.warning) def log_success(ignored): logging.info("Successfully fetched upgrade-tool files") def log_failure(failure): logging.warning("Couldn't fetch all upgrade-tool files") return failure result.addCallback(log_success) result.addErrback(log_failure) return result def verify(self, tarball_filename, signature_filename): """Verify the upgrade-tool tarball against its signature. @param tarball_filename: The filename of the upgrade-tool tarball. @param signature_filename: The filename of the tarball signature. """ result = gpg_verify(tarball_filename, signature_filename) def log_success(ignored): logging.info("Successfully verified upgrade-tool tarball") def log_failure(failure): logging.warning("Invalid signature for upgrade-tool tarball: %s" % str(failure.value)) return failure result.addCallback(log_success) result.addErrback(log_failure) return result def extract(self, tarball_filename): """Extract the upgrade-tool tarball. @param tarball_filename: The filename of the upgrade-tool tarball. """ tf = tarfile.open(tarball_filename, "r:gz") for member in tf.getmembers(): tf.extract(member, path=self._config.upgrade_tool_directory) return succeed(None) def tweak(self, current_code_name): """Tweak the files of the extracted tarballs to workaround known bugs. @param current_code_name: The code-name of the current release. """ upgrade_tool_directory = self._config.upgrade_tool_directory if current_code_name == "dapper": config_filename = os.path.join(upgrade_tool_directory, "DistUpgrade.cfg.dapper") config = ConfigParser.ConfigParser() config.read(config_filename) # Fix a bug in the DistUpgrade.cfg.dapper file contained in # the upgrade tool tarball if not config.has_section("NonInteractive"): config.add_section("NonInteractive") config.set("NonInteractive", "ForceOverwrite", "no") # Workaround for Bug #174148, which prevents dbus from restarting # after a dapper->hardy upgrade if not config.has_section("Distro"): config.add_section("Distro") if not config.has_option("Distro", "PostInstallScripts"): config.set("Distro", "PostInstallScripts", "./dbus.sh") else: scripts = config.get("Distro", "PostInstallScripts") scripts += ", ./dbus.sh" config.set("Distro", "PostInstallScripts", scripts) # Write config changes to disk fd = open(config_filename, "w") config.write(fd) fd.close() # Generate the post-install script that starts DBus dbus_sh_filename = os.path.join(upgrade_tool_directory, "dbus.sh") fd = open(dbus_sh_filename, "w") fd.write("#!/bin/sh\n" "/etc/init.d/dbus start\n" "sleep 10\n") fd.close() os.chmod(dbus_sh_filename, 0755) # On some releases the upgrade-tool doesn't support the allow third # party environment variable, so this trick is needed to make it # possible to upgrade against testing client packages from the # Landscape PPA mirrors_filename = os.path.join(upgrade_tool_directory, "mirrors.cfg") fd = open(mirrors_filename, "a") fd.write(self.landscape_ppa_url + "\n") fd.close() return succeed(None) def make_operation_result_text(self, out, err): """Return the operation result text to be sent to the server. @param out: The standard output of the upgrade-tool process. @param err: The standard error of the upgrade-tool process. @return: A text aggregating the process output, error and log files. """ buf = cStringIO.StringIO() for label, content in [("output", out), ("error", err)]: if content: buf.write("=== Standard %s ===\n\n%s\n\n" % (label, content)) for basename in sorted(os.listdir(self.logs_directory)): if not basename.endswith(".log"): continue filename = os.path.join(self.logs_directory, basename) content = read_file(filename, - self.logs_limit) buf.write("=== %s ===\n\n%s\n\n" % (basename, content)) return buf.getvalue() def upgrade(self, code_name, operation_id, allow_third_party=False, debug=False, mode=None): """Run the upgrade-tool command and send a report of the results. @param code_name: The code-name of the release to upgrade to. @param operation_id: The activity id for this task. @param allow_third_party: Whether to enable non-official APT repo. @param debug: Whether to turn on debug level logging. @param mode: Optionally, the mode to run the upgrade-tool as. It can be "server" or "desktop", and it's relevant only for dapper. """ upgrade_tool_directory = self._config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, code_name) args = ["--frontend", "DistUpgradeViewNonInteractive"] if mode: args.extend(["--mode", mode]) env = os.environ.copy() if allow_third_party: env["RELEASE_UPRADER_ALLOW_THIRD_PARTY"] = "True" if debug: env["DEBUG_UPDATE_MANAGER"] = "True" result = spawn_process(upgrade_tool_filename, args=args, env=env, path=upgrade_tool_directory, wait_pipes=False) def send_operation_result((out, err, code)): if code == 0: status = SUCCEEDED else: status = FAILED text = self.make_operation_result_text(out, err) message = self.make_operation_result_message(operation_id, status, text, code) logging.info("Queuing message with release upgrade results to " "exchange urgently.") return self._broker.send_message(message, self._session_id, True) result.addCallback(send_operation_result) return result def finish(self): """Clean-up the upgrade-tool files and report about package changes.""" shutil.rmtree(self._config.upgrade_tool_directory) if os.getuid() == 0: uid = pwd.getpwnam("landscape").pw_uid gid = grp.getgrnam("landscape").gr_gid else: uid = None gid = None reporter = find_reporter_command() # Force an apt-update run, because the sources.list has changed args = ["--force-apt-update"] if self._config.config is not None: args.append("--config=%s" % self._config.config) return spawn_process(reporter, args=args, uid=uid, gid=gid, path=os.getcwd(), env=os.environ) def abort(self, failure, operation_id): """Abort the task reporting details about the failure.""" message = self.make_operation_result_message( operation_id, FAILED, "%s" % str(failure.value), 1) logging.info("Queuing message with release upgrade failure to " "exchange urgently.") return self._broker.send_message(message, self._session_id, True) @staticmethod def find_command(): return find_release_upgrader_command() def find_release_upgrader_command(): """Return the path to the landscape-release-upgrader script.""" dirname = os.path.dirname(os.path.abspath(sys.argv[0])) return os.path.join(dirname, "landscape-release-upgrader") def main(args): if os.getpgrp() != os.getpid(): os.setsid() return run_task_handler(ReleaseUpgrader, args) landscape-client-14.01/landscape/package/changer.py0000644000175000017500000003700712301414317022052 0ustar andreasandreasimport logging import base64 import time import sys import os import pwd import grp from twisted.internet.defer import maybeDeferred, succeed from twisted.internet import reactor from landscape.constants import ( SUCCESS_RESULT, ERROR_RESULT, DEPENDENCY_ERROR_RESULT, POLICY_STRICT, POLICY_ALLOW_INSTALLS, POLICY_ALLOW_ALL_CHANGES, UNKNOWN_PACKAGE_DATA_TIMEOUT) from landscape.lib.fs import create_file from landscape.lib.log import log_failure from landscape.package.reporter import find_reporter_command from landscape.package.taskhandler import ( PackageTaskHandler, PackageTaskHandlerConfiguration, PackageTaskError, run_task_handler) from landscape.manager.manager import FAILED from landscape.manager.shutdownmanager import ShutdownProcessProtocol from landscape.monitor.rebootrequired import REBOOT_REQUIRED_FILENAME class UnknownPackageData(Exception): """Raised when an ID or a hash isn't known.""" class PackageChangerConfiguration(PackageTaskHandlerConfiguration): """Specialized configuration for the Landscape package-changer.""" @property def binaries_path(self): """The path to the directory we store server-generated packages in.""" return os.path.join(self.package_directory, "binaries") class ChangePackagesResult(object): """Value object to hold the results of change packages operation. @ivar code: The result code of the requested changes. @ivar text: The output from Apt. @ivar installs: Possible additional packages that need to be installed in order to fulfill the request. @ivar removals: Possible additional packages that need to be removed in order to fulfill the request. """ def __init__(self): self.code = None self.text = None self.installs = [] self.removals = [] class PackageChanger(PackageTaskHandler): """Install, remove and upgrade packages.""" config_factory = PackageChangerConfiguration queue_name = "changer" def __init__(self, store, facade, remote, config, process_factory=reactor, landscape_reactor=None, reboot_required_filename=REBOOT_REQUIRED_FILENAME): super(PackageChanger, self).__init__(store, facade, remote, config) self._process_factory = process_factory if landscape_reactor is None: # For testing purposes. from landscape.reactor import LandscapeReactor self._landscape_reactor = LandscapeReactor() else: self._landscape_reactor = landscape_reactor self.reboot_required_filename = reboot_required_filename def run(self): """ Handle our tasks and spawn the reporter if package data has changed. """ if not self.update_stamp_exists(): logging.warning("The package-reporter hasn't run yet, exiting.") return succeed(None) result = self.use_hash_id_db() result.addCallback(lambda x: self.get_session_id()) result.addCallback(lambda x: self.handle_tasks()) result.addCallback(lambda x: self.run_package_reporter()) return result def run_package_reporter(self): """ Run the L{PackageReporter} if there were successfully completed tasks. """ if self.handled_tasks_count == 0: # Nothing was done return if os.getuid() == 0: os.setgid(grp.getgrnam("landscape").gr_gid) os.setuid(pwd.getpwnam("landscape").pw_uid) command = find_reporter_command() if self._config.config is not None: command += " -c %s" % self._config.config os.system(command) def handle_task(self, task): """ @param task: A L{PackageTask} carrying a message of type C{"change-packages"}. """ message = task.data if message["type"] == "change-packages": result = maybeDeferred(self.handle_change_packages, message) return result.addErrback(self.unknown_package_data_error, task) if message["type"] == "change-package-locks": return self.handle_change_package_locks(message) def unknown_package_data_error(self, failure, task): """Handle L{UnknownPackageData} data errors. If the task is older than L{UNKNOWN_PACKAGE_DATA_TIMEOUT} seconds, a message is sent to the server to notify the failure of the associated activity and the task will be removed from the queue. Otherwise a L{PackageTaskError} is raised and the task will be picked up again at the next run. """ failure.trap(UnknownPackageData) logging.warning("Package data not yet synchronized with server (%r)" % failure.value.args[0]) if task.timestamp < time.time() - UNKNOWN_PACKAGE_DATA_TIMEOUT: message = {"type": "change-packages-result", "operation-id": task.data["operation-id"], "result-code": ERROR_RESULT, "result-text": "Package data has changed. " "Please retry the operation."} return self._broker.send_message(message, self._session_id) else: raise PackageTaskError() def update_stamp_exists(self): """ Return a boolean indicating if the update-stamp stamp file exists. """ return (os.path.exists(self._config.update_stamp_filename) or os.path.exists(self.update_notifier_stamp)) def _clear_binaries(self): """Remove any binaries and its associated channel.""" binaries_path = self._config.binaries_path for existing_deb_path in os.listdir(binaries_path): # Clean up the binaries we wrote in former runs os.remove(os.path.join(binaries_path, existing_deb_path)) self._facade.clear_channels() def init_channels(self, binaries=()): """Initialize the Apt channels as needed. @param binaries: A possibly empty list of 3-tuples of the form (hash, id, deb), holding the hash, the id and the content of additional Debian packages that should be loaded in the channels. """ binaries_path = self._config.binaries_path # Clean up the binaries we wrote in former runs self._clear_binaries() if binaries: hash_ids = {} for hash, id, deb in binaries: create_file(os.path.join(binaries_path, "%d.deb" % id), base64.decodestring(deb)) hash_ids[hash] = id self._store.set_hash_ids(hash_ids) self._facade.add_channel_deb_dir(binaries_path) self._facade.reload_channels(force_reload_binaries=True) self._facade.ensure_channels_reloaded() def mark_packages(self, upgrade=False, install=(), remove=(), hold=(), remove_hold=(), reset=True): """Mark packages for upgrade, installation or removal. @param upgrade: If C{True} mark all installed packages for upgrade. @param install: A list of package ids to be marked for installation. @param remove: A list of package ids to be marked for removal. @param hold: A list of package ids to be marked for holding. @param remove_hold: A list of package ids to be marked to have a hold removed. @param reset: If C{True} all existing marks will be reset. """ if reset: self._facade.reset_marks() if upgrade: self._facade.mark_global_upgrade() for mark_function, mark_ids in [ (self._facade.mark_install, install), (self._facade.mark_remove, remove), (self._facade.mark_hold, hold), (self._facade.mark_remove_hold, remove_hold)]: for mark_id in mark_ids: hash = self._store.get_id_hash(mark_id) if hash is None: raise UnknownPackageData(mark_id) package = self._facade.get_package_by_hash(hash) if package is None: raise UnknownPackageData(hash) mark_function(package) def change_packages(self, policy): """Perform the requested changes. @param policy: A value indicating what to do in case additional changes beside the ones explicitly requested are needed in order to fulfill the request (see L{complement_changes}). @return: A L{ChangePackagesResult} holding the details about the outcome of the requested changes. """ # Delay importing these so that we don't import Apt unless # we really need to. from landscape.package.facade import DependencyError, TransactionError result = ChangePackagesResult() count = 0 while result.code is None: count += 1 try: result.text = self._facade.perform_changes() except TransactionError, exception: result.code = ERROR_RESULT result.text = exception.args[0] except DependencyError, exception: for package in exception.packages: hash = self._facade.get_package_hash(package) id = self._store.get_hash_id(hash) if id is None: # Will have to wait until the server lets us know about # this id. raise UnknownPackageData(hash) if self._facade.is_package_installed(package): # Package currently installed. Must remove it. result.removals.append(id) else: # Package currently available. Must install it. result.installs.append(id) if count == 1 and self.may_complement_changes(result, policy): # Mark all missing packages and try one more iteration self.mark_packages(install=result.installs, remove=result.removals, reset=False) else: result.code = DEPENDENCY_ERROR_RESULT else: result.code = SUCCESS_RESULT return result def may_complement_changes(self, result, policy): """Decide whether or not we should complement the given changes. @param result: A L{PackagesResultObject} holding the details about the missing dependencies needed to complement the given changes. @param policy: It can be one of the following values: - L{POLICY_STRICT}, no additional packages will be marked. - L{POLICY_ALLOW_INSTALLS}, if only additional installs are missing they will be marked for installation. @return: A boolean indicating whether the given policy allows to complement the changes and retry. """ if policy == POLICY_ALLOW_ALL_CHANGES: return True if policy == POLICY_ALLOW_INSTALLS: # Note that package upgrades are one removal and one install, so # are not allowed here. if result.installs and not result.removals: return True return False def handle_change_packages(self, message): """Handle a C{change-packages} message.""" self.init_channels(message.get("binaries", ())) self.mark_packages(upgrade=message.get("upgrade-all", False), install=message.get("install", ()), remove=message.get("remove", ()), hold=message.get("hold", ()), remove_hold=message.get("remove-hold", ())) result = self.change_packages(message.get("policy", POLICY_STRICT)) self._clear_binaries() needs_reboot = (message.get("reboot-if-necessary") and os.path.exists(self.reboot_required_filename)) stop_exchanger = needs_reboot deferred = self._send_response(None, message, result, stop_exchanger=stop_exchanger) if needs_reboot: # Reboot the system after a short delay after the response has been # sent to the broker. This is to allow the broker time to save the # message to its on-disk queue before starting the reboot, which # will stop the landscape-client process. # It would be nice if the Deferred returned from # broker.send_message guaranteed the message was saved to disk # before firing, but that's not the case, so we add an additional # delay. deferred.addCallback(self._reboot_later) return deferred def _reboot_later(self, result): self._landscape_reactor.call_later(5, self._run_reboot) def _run_reboot(self): """ Create a C{ShutdownProcessProtocol} and return its result deferred. """ protocol = ShutdownProcessProtocol() minutes = "now" protocol.set_timeout(self._landscape_reactor) protocol.result.addCallback(self._log_reboot, minutes) protocol.result.addErrback(log_failure, "Reboot failed.") args = ["/sbin/shutdown", "-r", minutes, "Landscape is rebooting the system"] self._process_factory.spawnProcess( protocol, "/sbin/shutdown", args=args) return protocol.result def _log_reboot(self, result, minutes): """Log the reboot.""" logging.warning( "Landscape is rebooting the system in %s minutes" % minutes) def _send_response(self, reboot_result, message, package_change_result, stop_exchanger=False): """ Create a response and dispatch to the broker. """ response = {"type": "change-packages-result", "operation-id": message.get("operation-id")} response["result-code"] = package_change_result.code if package_change_result.text: response["result-text"] = package_change_result.text if package_change_result.installs: response["must-install"] = sorted(package_change_result.installs) if package_change_result.removals: response["must-remove"] = sorted(package_change_result.removals) logging.info("Queuing response with change package results to " "exchange urgently.") deferred = self._broker.send_message(response, self._session_id, True) if stop_exchanger: logging.info("stopping exchanger due to imminent reboot.") deferred.addCallback(lambda _: self._broker.stop_exchanger()) return deferred def handle_change_package_locks(self, message): """Handle a C{change-package-locks} message. Package locks aren't supported anymore. """ response = { "type": "operation-result", "operation-id": message.get("operation-id"), "status": FAILED, "result-text": "This client doesn't support package locks.", "result-code": 1} return self._broker.send_message(response, self._session_id, True) @staticmethod def find_command(): return find_changer_command() def find_changer_command(): dirname = os.path.dirname(os.path.abspath(sys.argv[0])) return os.path.join(dirname, "landscape-package-changer") def main(args): if os.getpgrp() != os.getpid(): os.setsid() return run_task_handler(PackageChanger, args) landscape-client-14.01/landscape/package/tests/0000755000175000017500000000000012301414317021224 5ustar andreasandreaslandscape-client-14.01/landscape/package/tests/helpers.py0000644000175000017500000007003412301414317023244 0ustar andreasandreasimport base64 import os import textwrap import time import apt_inst import apt_pkg from landscape.lib.fs import append_file, create_file from landscape.package.facade import AptFacade class AptFacadeHelper(object): """Helper that sets up an AptFacade with a tempdir as its root.""" def set_up(self, test_case): test_case.apt_root = test_case.makeDir() self.dpkg_status = os.path.join( test_case.apt_root, "var", "lib", "dpkg", "status") test_case.Facade = AptFacade test_case.facade = AptFacade(root=test_case.apt_root) test_case.facade.refetch_package_index = True test_case._add_system_package = self._add_system_package test_case._install_deb_file = self._install_deb_file test_case._add_package_to_deb_dir = self._add_package_to_deb_dir test_case._touch_packages_file = self._touch_packages_file test_case._hash_packages_by_name = self._hash_packages_by_name def _add_package(self, packages_file, name, architecture="all", version="1.0", control_fields=None): if control_fields is None: control_fields = {} package_stanza = textwrap.dedent(""" Package: %(name)s Priority: optional Section: misc Installed-Size: 1234 Maintainer: Someone Architecture: %(architecture)s Source: source Version: %(version)s Description: description """ % {"name": name, "version": version, "architecture": architecture}) package_stanza = apt_pkg.rewrite_section( apt_pkg.TagSection(package_stanza), apt_pkg.REWRITE_PACKAGE_ORDER, control_fields.items()) append_file(packages_file, "\n" + package_stanza + "\n") def _add_system_package(self, name, architecture="all", version="1.0", control_fields=None): """Add a package to the dpkg status file.""" system_control_fields = {"Status": "install ok installed"} if control_fields is not None: system_control_fields.update(control_fields) self._add_package( self.dpkg_status, name, architecture=architecture, version=version, control_fields=system_control_fields) def _install_deb_file(self, path): """Fake the the given deb file is installed in the system.""" deb_file = open(path) deb = apt_inst.DebFile(deb_file) control = deb.control.extractdata("control") deb_file.close() lines = control.splitlines() lines.insert(1, "Status: install ok installed") status = "\n".join(lines) append_file(self.dpkg_status, status + "\n\n") def _add_package_to_deb_dir(self, path, name, architecture="all", version="1.0", control_fields=None): """Add fake package information to a directory. There will only be basic information about the package available, so that get_packages() have something to return. There won't be an actual package in the dir. """ if control_fields is None: control_fields = {} self._add_package( os.path.join(path, "Packages"), name, architecture=architecture, version=version, control_fields=control_fields) def _touch_packages_file(self, deb_dir): """Make sure the Packages file get a newer mtime value. If we rely on simply writing to the file to update the mtime, we might end up with the same as before, since the resolution is seconds, which causes apt to not reload the file. """ packages_path = os.path.join(deb_dir, "Packages") mtime = int(time.time() + 1) os.utime(packages_path, (mtime, mtime)) def _hash_packages_by_name(self, facade, store, package_name): """ Ensure the named L{Package} is correctly recorded in the store so that we can really test the functions of the facade that depend on it. """ hash_ids = {} for version in facade.get_packages_by_name(package_name): skeleton = facade.get_package_skeleton( version, with_info=False) hash = skeleton.get_hash() facade._pkg2hash[(version.package, version)] = hash hash_ids[hash] = version.package.id store.set_hash_ids(hash_ids) class SimpleRepositoryHelper(object): """Helper for adding a simple repository to the facade. This helper requires that C{test_case.facade} has already been set up. """ def set_up(self, test_case): test_case.repository_dir = test_case.makeDir() create_simple_repository(test_case.repository_dir) test_case.facade.add_channel_deb_dir(test_case.repository_dir) PKGNAME1 = "name1_version1-release1_all.deb" PKGNAME2 = "name2_version2-release2_all.deb" PKGNAME3 = "name3_version3-release3_all.deb" PKGNAME4 = "name3_version3-release4_all.deb" PKGNAME_MINIMAL = "minimal_1.0_all.deb" PKGNAME_SIMPLE_RELATIONS = "simple-relations_1.0_all.deb" PKGNAME_VERSION_RELATIONS = "version-relations_1.0_all.deb" PKGNAME_MULTIPLE_RELATIONS = "multiple-relations_1.0_all.deb" PKGNAME_OR_RELATIONS = "or-relations_1.0_all.deb" PKGDEB1 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMT" "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ0OTMg" "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UQW+bMB" "QHcM58Ch+7QwCbEJpomzat0rTDpmiRenfNC7EGNnuGSOmnnwMlyVK1O6VT1feTkJ+e" "/wRh40RxcHGJl2dZP3rnY1/zJJsmIs9Fvs/5UQQsC15A51qJjAVobftc7l/zr1QU10" "XmutpdeP9n0+mT+8+5+Hv/fSOdBiyh/b84MYM1n2fz7G4t0+u5SvMkhbTgs3wu+CwB" "xjqHsdtIhLiwKjayBh6rjTQlVLaMbuBOSxOV92FAXuX5V9a0aKv/eP5zkZyf/1TQ+X" "8RS6l+yRIWrD/Y4S2g09Ys2HYo+AShAun81ApU2099Rds1PFyitqjb3YLZZj8hq/Az" "qo1ufa5D/4uyqnwIJjfQgCncgjUICL87jdA/jF19OGmND3wXHvLn4UfJn6BsXY/hsT" "7Jj63jLauuLMG1/gb3UB3iY+MY/mLNutJqn1ZjeYgfOsf8Eu1WF9C/6lANq/rN+I+s" "qqCYrPS9XxlxHX6X2rT+AvQLuv8Gt5b90FDDDpC9L4fOJ/PQiQy0H/3COIW6GXZh1d" "W1xB0P2Umb078wIYQQQgghhBBCCCGEEEIIIYS8UX8AYydx2gAoAABkYXRhLnRhci5n" "eiAgICAgMTE2NjExNDQ5MyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTQgICAgICAgYA" "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5kDlIHpI0YFEwZ6ABK" "i0sSixQUGIry80vwqSMkP0SBnn5pcZH+YIp/EwYDIMd4NP7pGP/FGYlFqfqDJ/4NzY" "xNRuOf3vGfkp+sPzji38jEwHA0/gci/vMSc1MN9Qc6/o2B7NH4H7j4T85IzEtPzclP" "13NJTcpMzNNLr6Iw/s1MTHDGv5GxOSz+zUxNjYDxbw7kMSgYjMY/zYF8NwdHVm2jKx" "Mzepwz6J7y5jpkIOH6sDKssF1rmUqYzBX2piZj9zyFad5RHv8dLoXsqua2spF3v+PQ" "ffXIlN8aYepsu3x2u0202VX+QFC10st6vvMfDdacgtdzKtpe5G5tuFYx5elcpXm27O" "d8LH7Oj3mqP7VgD8P6dTmJ33dsPnpuBnPO3SvLDNlu6ay9It6yZon0BIZRMApGwSgY" "BaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGAUkAADhX8vgAKAAA ") PKGDEB2 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMT" "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjYxMTQ1MjIg" "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUyICAgICAgIGAKH4sIAAAAAAAAA+3UTY/TMB" "AG4JzzK3yEQ/Phxk1aAQKxEuIAqrYSd+NMU4vEDuOkUvfX4yabthQBpy5aMY9UZTR+" "06ieuFEc3Fzi5UIMV+/6OtRpIrKE5/l8zn0/z9MsYCJ4Ar3rJDIWoLXdn3J/W3+mor" "gphesbd+P5L7Lst/NPU/7z/H2DLwKW0PxvrixSlYkiAVGIxZJnaSHFdilUDplabnnG" "WO8wdjuJEJdWxUY2wGO1k6aC2lbRHXzV0kTVQxiQZ3n+lTUd2vofnv+cJ9fnf57S+X" "8Sa6m+yQpWbDjY4RdAp61Zsf1Y8BlCDdL5pQ2oblj6gLZvebhGbVF3hxWz7XFB1uE7" "VDvd+VyP/htlXfsQzO6gBVO6FWsREL73GmF4GHvx+qI1PfBleMpfh39J3oOyTTOFp/" "oiP7XOt2z6qgLX+RvcY3WKT41z+L0121qrY1pN5Sl+6pzza7R7XcLwU8dq3NWPxr9k" "dQ3lbKMf/M7wIvwkten8B9Bv6PEd3Fv2WUMDB0D2qho7b81jJzLQvfEb4xTqdpzCpm" "8aiQcesos2p39hQgghhBBCCCGEEEIIIYQQQgj5T/0AyM2cyQAoAABkYXRhLnRhci5n" "eiAgICAgMTE2NjExNDUyMiAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTMgICAgICAgYA" "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqYmBkbm5sbAgUNzc3NGZQMGWg" "AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDLP6BHCOT0finX/wXZyQWpeoPnvg3ND" "MyG41/esd/Sn6y/uCIfyNj89Hyf0DiPy8xN9VIf6Dj39jY3HQ0/gcu/pMzEvPSU3Py" "0/VcUpMyE/P00qsojH8zExOc8Q/M7Yj4Bxb8BobmBsDkomAwGv80B/LdHBzX6hpdmZ" "jR45xB99RGrkMGEq4Pbf0L3UWDL4XIRIk6Hjx7Urzj6SSxS/YTzKbu28sqe/64oPmF" "JGPj3lqR1cLMdz12u04rLHp/gM2y0mv3HOc/GqxvCl7PqWh7kbux6VrFk69zlefZsu" "v5WPycH/NUv7VgF8N6vfeBcgXp3NlnBFNDw5eZsd1as/aK+JzyvZ0TGEbBKBgFo2AU" "jIJRMApGwSgYBaNgFIyCUTAKRsEoGAWjYBSMglEwCkbBKBgFJAEAu4OlKQAoAAAK") PKGDEB3 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMT" "AwNjQ0ICA0ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDExNjkxNDgyMDMg" "IDAgICAgIDAgICAgIDEwMDY0NCAgNDUxICAgICAgIGAKH4sIAAAAAAAAA+3UwY7TMB" "AG4JzzFD7CoUkax7iqYAViJcQBVFGJu3GmqbWJHcZJpe7T4yabtnS1cOqiFfNJVUbj" "P43qiZuk0dVlgRRiuAaX16GeZ0JwWRSF4KEvZc4jJqJn0PtOIWMROtf9Kfe39RcqSZ" "tS+L7xV57/m6J4cv7zef77/EODi4hlNP+r4yIrc1mUUs43C1VmhcxLEAKkFouCbzRj" "vcfUbxVCWjqdWtUAT/VW2QpqVyW38MMom1T3cURe5PnXznbo6n94/mWeXZ5/ntP5fx" "Yrpe9UBUs2HOz4O6A3zi7Zbiz4DKEG5cPSGnQ3LH1C17c8XqFxaLr9krn2sKDq+APq" "relCrsfwjaquQwhmt9CCLf2StQgIP3uDMDyMvXp31poe+Do+5i/Dj5LfQLummcJTfZ" "afWqdb1n1Vge/CDf6hOsanxin80dlNbfQhrafyGD92TvkVup0pYfipYzXu6mcbXrK6" "hnK2NvdhZ/JF/EUZ24UPYNjQwzu4c+yrgQb2gOxtNXbe24dOYqG7CRvjNZp2nMK6bx" "qFex6zszanf2FCCCGEEEIIIYQQQgghhBBCCPlP/QK+dA1dACgAAApkYXRhLnRhci5n" "eiAgICAgMTE2OTE0ODIwMyAgMCAgICAgMCAgICAgMTAwNjQ0ICAzOTkgICAgICAgYA" "ofiwgAAAAAAAAD09NnoDkwAAJzU1MwDQToNJhtaGBqamxuYmJiagQUNzc3MmJQMGWg" "AygtLkksUlBgKMrPL8GnjpD8EAV6+qXFRfqDKf4NGQyAHOPR+Kdj/BdnJBal6g+e+D" "c0MzYZjX96x39KfrL+4Ih/IxMDw9H4H4j4z0vMTTXWH8j4B9b/hsYmBqaj8T9w8Z+c" "kZiXnpqTn67nkpqUmZinl15FYfybmZjgjH8jY3NE/JuYAePfHKieQcFgNP5pDuS7OT" "jUTq53ZWJGj3MG3VPeXIcMJFwfVoYVtmstW+Imc4W9qcnYPU9hmneUx3+HSyG7qrmt" "bOTd7zh0Xz0y5bdGmDrbLp/dbhNtdpU/EFSt9LKe7/xHgzWn4PWcirYXuVsbrlVMeT" "pXaZ4t+zkfi5/zY57qTy3Yw7B+XU7g+8L07rmG7Fe2bVxmyHZLZ+0V8Sl2Xj8mMIyC" "UTAKRsEoGAWjYBSMglEwCkbBKBgFo2AUjIJRMApGwSgYBaNgFIyCUTAKSAIAY/FOKA" "AoAAAK") PKGDEB4 = ("ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTI3NjUxMTU3OC41MCAgICAgMCAgICAgNj" "Q0ICAgICA0\nICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEyNzY1MTE1Nz" "guNTAgICAgIDAgICAgIDY0\nNCAgICAgMjk1ICAgICAgIGAKH4sIAFoFFkwC/+3TwU" "6EMBAGYM48RV9goS0dqnszMSbeTEy8F6iE\nCJS04MGnt2GzBzHqiVWT/7u0yVCm8G" "eyPNkdjzTRukbbdd0LoTgpLqmQCRdCckoYJRewhNn4eBXv\n3Pzdcz/Vtx/3T2R57c" "bZu37n/EulvsxfqnKTvyyFTBhH/rt7MPWLae2RjWawIn2yPnRuPLLX00Zk\n4uBtb0" "2Ixfsx/qu+t83hsXuLRwRPb22ofTfN65kbFsww9ZYtU+tNY9l0ennK7pxnsw1zN7bn" "YsjS\nD72LT72Lc2eVJrDb/A8NhWUIvzj/nMR2/kkKzP8lNERFJZWOGWiqiF89ayVt" "qbWhSlfimrEsD26w\nGEEAAAAAAAAAAAAAAAAAAIC/6x1piYqhACgAAApkYXRhLnRh" "ci5neiAgICAgMTI3NjUxMTU3OC41\nMCAgICAgMCAgICAgNjQ0ICAgICAxNDUgICAg" "ICAgYAofiwgAWgUWTAL/7dFBCsMgEEDRWfcUniCZ\nsU57kJ5ASJdFSOz9K9kULLQr" "C4H/NiPqQvnTLMNpc3XfZ9PPfW2W1JOae9s3i5okuPzBc6t5bU9Z\nS6nf7v067z93" "ENO8lcd9fP/LZ/d3f4td/6h+lqD0H+7W6ocl13wSAAAAAAAAAAAAAAAAAAfzAqr5\n" "GFYAKAAACg==\n") PKGDEB_MINIMAL = ( "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg5MDQ3OSAgMCAgICAgMCAgICAgMTAwNj" "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4OTA0NzkgIDAgICA" "gIDAgICAgIDEw MDY0NCAgMjU4ICAgICAgIGAKH4sIAAAAAAACA+3Rz0rEMBAG8Jz7FPME" "3aT/FoqIC54EwZP3mB1s 1jQp0yz6+HaVBRFcTxWE7wfJQDKZHL5yo1anF9u2PVWzbfXXe" "qaM6Zq66pqurZQ2uqorRa36A8c5 WyFST4ck8ULfb/f/VLlxKWZJYeX8u6b5Mf+qbr7lb7" "rliDTyX92DdS/2mXsaffSjDcUjy+xT7MmU utiJG3xml4+ytNgQinvrY14WS093aYh0dVj" "2G36z4xS4dGm8Lm55duKn/DFmd55M0+dX9OrzQDHR nieOe47O80xJKOWBhYSDPb2cy0IB" "AAAAAAAAAAAAAAAAAAAAAMBF70s1/foAKAAAZGF0YS50YXIu Z3ogICAgIDEzMTc4OTA0N" "zkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAgICAgIGAKH4sIAAAA AAACA+3KsQ3CQB" "AEwCvlK4D/N4frMSGBkQz0jwmQiHCEo5lkpd09HOPv6mrMfGcbs37nR7R2Pg01" "ew5r32rvNUrGDp73x7SUEpfrbZl//LZ2AAAAAAAAAAAA2NELx33R7wAoAAAK") PKGDEB_SIMPLE_RELATIONS = ( "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjMyMiAgMCAgICAgMCAgICAgMTAwNj" "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTYzMjIgIDAgICA" "gIDAgICAgIDEw MDY0NCAgMzQ0ICAgICAgIGAKH4sIAAAAAAACA+3R3UrDMBQH8F7nKc4L" "rGu2tYMi4tQrQRkI3mdp tNnSpKTZ0Lc37TYVQb2aIvx/0Ob09DQfPek4Obksmud5P/J5n" "n0cjxLOi1mez6ecT5KMZ5NJkVCe /IJtF4QnSlZr5+03dT+9/6fSsXQ2eGdO3P9iNvuy/3" "mWf+o/L6Y8oQz9P7mlkBvxpErqdNMaNfLK iKCd7diD8l0MSuJpxu6VDMNDozvJll47r8N" "LSa7t08KwhZe1DrFq6+NkwphYpEbXqlW26kpqvaqG mLO33DFx5eyj0TLElDyEnF16JTYx" "s+pHHidzO12pYaYh4uxWaBvipXxJN662dLaO9wv1LPqDpNI1 53GtTnrd7re+iJu3uhGG2" "v2hKdQiUC26w+Hp/fAU3Tna7f8BCa+OC1ekbfzwQ3HKEgAAAAAAAAAA AAAAAAAAAACAv/" "EKgcHt1gAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjMyMiAgMCAgICAgMCAg ICAgMTA" "wNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxDcJQEETBK8UVwH2b+64HQgIjGegf " "CJCIIMLRTPKC3d0+/i6f5qpX21z52bdorR+m7Fl9imw5jhVDxQbu19txHYY4nS/r8uX3aw" "cAAAAA AAAAAIANPQALnD6FACgAAAo=") PKGDEB_VERSION_RELATIONS = ( "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODUxNjQ5OCAgMCAgICAgMCAgICAgMTAwNj" "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1MTY0OTggIDAgICA" "gIDAgICAgIDEw MDY0NCAgMzUwICAgICAgIGAKH4sIAAAAAAACA+3RQUvDMBQH8Jz7KXLU" "w7pmazcoczj1JAgDwXuW xTVbmpQkG/rtTds5RFBPGwj/H7R5fS9N07x0SM4ui6ZF0Y5sW" "mRfx0+EsUleFNNxznKSsWw0HhNa kAvY+8AdpWS1tc78Mu+v+j+VDoU1wVl95v5P8vzH/h" "eMfes/m7T9z9D/s1tyseMbWdKDdF5ZM3BS 8xADn7z0mZKyNEuepQjdQ628SJZOWafCe0l" "t06a5ThZOVCrEWXsXV+Nax0ly8CAbada+pI2T6y5m 9Gp2Q0dpdp2ciqfKsXBvzatWIsSS" "OIbta7O+euck38XSqh1jfj7v80tnD2otu491EUueuDIhXtKV 9NFWhs628X4r33jdaJkKW" "8/jLrxwqun/bhH/z6iaa9r0B0NDxQOtuKeng2n31C6qzObz1HyaEAAA AAAAAAAAAAAAAA" "AAAACAy/sAwTtOtwAoAABkYXRhLnRhci5neiAgICAgMTMxODUxNjQ5OCAgMCAg ICAgMCA" "gICAgMTAwNjQ0ICAxMDcgICAgICAgYAofiwgAAAAAAAID7cqxEcIwEETRK0UVgCT7UD0Q " "EpgZA/0DATNEEOHoveQHu7t9/F19GpmvtpH1s2/R2mGeemYfc9RW+9SjZGzgfr0d11LidL" "6sy5ff rx0AAAAAAAAAAAA29AD/ixlwACgAAAo=") PKGDEB_MULTIPLE_RELATIONS = ( "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxODU4MDA3OSAgMCAgICAgMCAgICAgMTAwNj" "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTg1ODAwNzkgIDAgICA" "gIDAgICAgIDEw MDY0NCAgMzgzICAgICAgIGAKH4sIAAAAAAACA+3RXUvDMBQG4F7nV5xL" "BVeb2nZQ5vDrShAGgvcx izaaNiXNRMEfb9atcwq6qwnC+8CW05N3bXcSH0d7lwTjPF+uf" "Jwn2+sg4rzI8nERYjxKeJJmaUR5 9AcWnReOKLp/sq75Jbdr/5+Kj6VtvLNmz+dfZNmP51" "+cZN/OnxdhmxKc/97NhHwWj6qkemG8bo0a OWWE17bp2J1yXShK4nHCbpX0/UWtO8lmTlu" "n/VtJtl22hWHnTlbah9TChdsJY0JIja5Uq5p5V1Lr 1LyvOR1MTimNk8Ojz2bKNsFNagit" "Gif0vq4yOphOv+yl7NI2D0ZLH34v1+XyOZN1bOil7MIp8RxS 98uVb92pb6Thne2Lnqv+h" "fuKHw1Vym6Ebnz4KFfSta0amjyF7zP1KuowuVjaehr+RyedblezOg/T anQtDLWrOZOvhK" "dKdJt504swC9XRg3WkhKxomH/MIgAAAAAAAAAAAAAAAAAAAACAHT4AFDs6bAAo AAAKZGF" "0YS50YXIuZ3ogICAgIDEzMTg1ODAwNzkgIDAgICAgIDAgICAgIDEwMDY0NCAgMTA3ICAg " "ICAgIGAKH4sIAAAAAAACA+3KsRHCMBBE0StFFYBkfFY9EBKYGWP3DwTMEEGEo/eSH+wejv" "F39aln vtp61s++RWvTeBpy6tmjtjqMLUrGDrb7el5Kicv1tsxffr92AAAAAAAAAAAA2NE" "Db6L1AQAoAAAK") PKGDEB_OR_RELATIONS = ( "ITxhcmNoPgpkZWJpYW4tYmluYXJ5ICAgMTMxNzg4ODg2OSAgMCAgICAgMCAgICAgMTAwNj" "Q0ICA0 ICAgICAgICAgYAoyLjAKY29udHJvbC50YXIuZ3ogIDEzMTc4ODg4NjkgIDAgICA" "gIDAgICAgIDEw MDY0NCAgMzc1ICAgICAgIGAKH4sIAAAAAAACA+3R30vDMBAH8D73r7g3" "FbS23brBUFHwSRAFwfeY HmtmlpQ08wf4x3vbWB2Cig8ThO8H2qbp3fXCZcfJzuViXFXLZ" "zGu8u3nRlIUo+GgHBXVUPaLvCwH CVXJH1h0UQWi5GHmg/sm7qfv/1R2rL2Lwdsdz380HH" "45f5n6p/kXo0GeUI7579yt0o9qyhPy4Siw VdF416X3HDpZTKjI8vSOdVy9zE2n09tgfDD" "xVTLa5bay6UXQjYkStQhSSFkrQXx0yS27uptQG7he rQvaPzmlMssP6O1jt0z7yD6sj9qE" "XCvjolwcJnTlG0cnM7mf84uat5Yz7ednUqbTwbTrXi+kW2fm ylK7PiHFRkVqVCcnpf6kW" "UrixtlX2uqZlKupXwcm47Rd1FwfUidLJh8b3qqyqr2qpJWTfzyxtC55 bi/2qfTcsJPvVi" "+WWW4qSdw3J301WZoAAAAAAAAAAAAAAAAAAAAAAPzOO2wqjioAKAAACmRhdGEu dGFyLmd" "6ICAgICAxMzE3ODg4ODY5ICAwICAgICAwICAgICAxMDA2NDQgIDEwNyAgICAgICBgCh+L " "CAAAAAAAAgPtyrsRwjAURNFXiioAfZBcjwkJzIyB/oGAGSIc4eic5Aa7h2P8XX6Zen+3TD" "1/9yNK" "GadWR2ltRC651hGpxw4et/u8phTny3Vdfvy2dgAAAAAAAAAAANjRE6Lr2rEAKAAACg==") HASH1 = base64.decodestring("/ezv4AefpJJ8DuYFSq4RiEHJYP4=") HASH2 = base64.decodestring("glP4DwWOfMULm0AkRXYsH/exehc=") HASH3 = base64.decodestring("NJM05mj86veaSInYxxqL1wahods=") HASH4 = 'c\xc1\xe6\xe1U\xde\xb6:\x03\xcb\xb9\xdc\xee\x91\xb7"\xc9\xb1\xe4\x8f' HASH5 = '|\x93K\xe0gx\xba\xe4\x85\x84\xd9\xf4%\x8bB\xbdR\x97\xdb\xfc' HASH6 = '\xedt!=,\\\rk\xa7\xe3$\xfb\x06\x9c\x88\x92)\xc2\xfb\xd6' HASH7 = 'D\xb1\xb6\xf5\xaa\xa8i\x84\x07#x\x97\x01\xf7`.\x9b\xde\xfb ' HASH_MINIMAL = "6\xce\x8f\x1bM\x82MWZ\x1a\xffjAc(\xdb(\xa1\x0eG" HASH_SIMPLE_RELATIONS = ( "'#\xab&k\xe6\xf5E\xcfB\x9b\xceO7\xe6\xec\xa9\xddY\xaa") HASH_VERSION_RELATIONS = ( '\x84\xc9\xb4\xb3\r\x95\x16\x03\x95\x98\xc0\x14u\x06\xf7eA\xe65\xd1') HASH_MULTIPLE_RELATIONS = ( '\xec\xcdi\xdc\xde-\r\xc3\xd3\xc9s\x84\xe4\xc3\xd6\xc4\x12T\xa6\x0e') HASH_OR_RELATIONS = ( '\xa1q\xf4*\x1c\xd4L\xa1\xca\xf1\xfa?\xc3\xc7\x9f\x88\xd53B\xc9') RELEASES = {"hardy": """Origin: Ubuntu Label: Ubuntu Codename: hardy Version: 8.04 Date: Tue, 31 Mar 2009 13:30:02 +0000 Architectures: amd64 i386 Components: main restricted MD5Sum: 356312bc1c0ab2b8dbe5c67f09879497 827 main/binary-i386/Packages ad2d9b94381264ce25cda7cfa1b2da03 555 main/binary-i386/Packages.gz 2f6ee66ed2d2b4115fabc8eed428e42e 78 main/binary-i386/Release f0fd5c1bb18584cf07f9bf4a9f2e6d92 605 main/binary-amd64/Packages 98860034ca03a73a9face10af8238a81 407 main/binary-amd64/Packages.gz 7e40db962fe49b6db232bf559cf6f79d 79 main/binary-amd64/Release 99e2e7213a7fdd8dd6860623bbf700e6 538 restricted/binary-i386/Packages 7771307958f2800bafb5cd96292308bd 384 restricted/binary-i386/Packages.gz 8686ad9c5d83484dc66a1eca2bd8030f 84 restricted/binary-i386/Release 99e2e7213a7fdd8dd6860623bbf700e6 538 restricted/binary-amd64/Packages 7771307958f2800bafb5cd96292308bd 384 restricted/binary-amd64/Packages.gz 6e24798a6089cd3a21226182784995e9 85 restricted/binary-amd64/Release SHA1: 1f39494284f8da4a1cdd788a3d91a048c5edf7f5 827 main/binary-i386/Packages e79a66d7543f24f77a9ffe1409431ae717781375 555 main/binary-i386/Packages.gz 5fe86036c60d6210b662df3acc238e2936f03581 78 main/binary-i386/Release 37ba69be70f4a79506038c0124293187bc879014 605 main/binary-amd64/Packages 65dca66c72b18d59cdcf671775104e86cbe2123a 407 main/binary-amd64/Packages.gz c9810732c61aa7de2887b5194c6a09d0b6118664 79 main/binary-amd64/Release 4cdb64c700f798f719f5c81ae42e44582be094c5 538 restricted/binary-i386/Packages 190f980fd80d58284129ee050f9eb70b9590fedb 384 restricted/binary-i386/Packages.gz b1d1a4d57f5c8d70184c9661a087b8a92406c76d 84 restricted/binary-i386/Release 4cdb64c700f798f719f5c81ae42e44582be094c5 538 restricted/binary-amd64/Packages 190f980fd80d58284129ee050f9eb70b9590fedb 384 restricted/binary-amd64/Packages.gz 4bd64fb2ef44037254729ab514d3403a65db7123 85 restricted/binary-amd64/Release """, "hardy-updates": """Origin: Ubuntu Label: Ubuntu Codename: hardy-updates Version: 8.04 Date: Tue, 31 Mar 2009 13:32:17 +0000 Architectures: i386 amd64 Components: main restricted MD5Sum: a23ba734dc4fe7c1ec8dc960cc670b8e 1227 main/binary-i386/Packages 2d6d271964be8000808abfa2b0e999b7 713 main/binary-i386/Packages.gz 2f6ee66ed2d2b4115fabc8eed428e42e 78 main/binary-i386/Release a23ba734dc4fe7c1ec8dc960cc670b8e 1227 main/binary-amd64/Packages 2d6d271964be8000808abfa2b0e999b7 713 main/binary-amd64/Packages.gz 7e40db962fe49b6db232bf559cf6f79d 79 main/binary-amd64/Release d41d8cd98f00b204e9800998ecf8427e 0 restricted/binary-i386/Packages 7029066c27ac6f5ef18d660d5741979a 20 restricted/binary-i386/Packages.gz 8686ad9c5d83484dc66a1eca2bd8030f 84 restricted/binary-i386/Release d41d8cd98f00b204e9800998ecf8427e 0 restricted/binary-amd64/Packages 7029066c27ac6f5ef18d660d5741979a 20 restricted/binary-amd64/Packages.gz 6e24798a6089cd3a21226182784995e9 85 restricted/binary-amd64/Release SHA1: 9867c9f7ebbb5741fc589d0d4395ea8f74f3b5e4 1227 main/binary-i386/Packages 2a7061fa162a607a63453c0360678052a38f0259 713 main/binary-i386/Packages.gz 5fe86036c60d6210b662df3acc238e2936f03581 78 main/binary-i386/Release 9867c9f7ebbb5741fc589d0d4395ea8f74f3b5e4 1227 main/binary-amd64/Packages 2a7061fa162a607a63453c0360678052a38f0259 713 main/binary-amd64/Packages.gz c9810732c61aa7de2887b5194c6a09d0b6118664 79 main/binary-amd64/Release da39a3ee5e6b4b0d3255bfef95601890afd80709 0 restricted/binary-i386/Packages 46c6643f07aa7f6bfe7118de926b86defc5087c4 20 restricted/binary-i386/Packages.gz b1d1a4d57f5c8d70184c9661a087b8a92406c76d 84 restricted/binary-i386/Release da39a3ee5e6b4b0d3255bfef95601890afd80709 0 restricted/binary-amd64/Packages 46c6643f07aa7f6bfe7118de926b86defc5087c4 20 restricted/binary-amd64/Packages.gz 4bd64fb2ef44037254729ab514d3403a65db7123 85 restricted/binary-amd64/Release """} PACKAGES = {"hardy": {"restricted": {"amd64": """Package: kairos Version: 0.0.8 Architecture: all Maintainer: Free Ekanayaka Installed-Size: 192 Pre-Depends: libaugeas0, python-augeas, augeas-tools, jackd, rotter, monit, darkice, soma, python-remix, nfs-kernel-server, icecast2 Priority: extra Section: admin Filename: pool/restricted/k/kairos/kairos_0.0.8_all.deb Size: 60768 SHA1: 1e5cc71cbd33d2b26a8feb19a48e815f271cd335 MD5sum: 5fd717ed3d15db25ffaa9d05fec62e42 Description: kairos customisation package This package configures and customises an kairos machine. """, "i386": """Package: kairos Version: 0.0.8 Architecture: all Maintainer: Free Ekanayaka Installed-Size: 192 Pre-Depends: libaugeas0, python-augeas, augeas-tools, jackd, rotter, monit, darkice, soma, python-remix, nfs-kernel-server, icecast2 Priority: extra Section: admin Filename: pool/restricted/k/kairos/kairos_0.0.8_all.deb Size: 60768 SHA1: 1e5cc71cbd33d2b26a8feb19a48e815f271cd335 MD5sum: 5fd717ed3d15db25ffaa9d05fec62e42 Description: kairos customisation package This package configures and customises an kairos machine. """}, "main": {"amd64": """Package: libclthreads2 Source: clthreads Version: 2.4.0-1 Architecture: amd64 Maintainer: Debian Multimedia Maintainers Installed-Size: 80 Depends: libc6 (>= 2.3.2), libgcc1 (>= 1:4.1.1), libstdc++6 (>= 4.1.1) Priority: extra Section: libs Filename: pool/main/c/clthreads/libclthreads2_2.4.0-1_amd64.deb Size: 12938 SHA1: dc6cb78896642dd436851888b8bd4454ab8f421b MD5sum: 19960adb88e178fb7eb4997b47eee05b Description: POSIX threads C++ access library C++ wrapper library around the POSIX threads API. This package includes the shared library object. """, "i386": """Package: syslinux Version: 2:3.73+dfsg-2 Architecture: i386 Maintainer: Daniel Baumann Installed-Size: 140 Depends: libc6 (>= 2.7-1), syslinux-common (= 2:3.73+dfsg-2), dosfstools, mtools Homepage: http://syslinux.zytor.com/ Priority: optional Section: utils Filename: pool/main/s/syslinux/syslinux_3.73+dfsg-2_i386.deb Size: 70384 SHA1: 6edf6a7e81a5e9759270872e45c782394dfa85e5 MD5sum: ae8baa9f6c6a172a3b127af1e6675046 Description: utilities for the syslinux bootloaders SYSLINUX is a suite of lightweight bootloaders, currently supporting DOS FAT filesystems (SYSLINUX), Linux ext2/ext3 filesystems (EXTLINUX), PXE network booting (PXELINUX), or bootable "El Torito" ISO 9660 CD-ROMs (ISOLINUX). It also includes a tool, MEMDISK, which loads legacy operating systems (such as DOS) from these media. """}}, "hardy-updates": {"restricted": {"amd64": """""", "i386": """"""}, "main": {"amd64": """Package: rebuildd Version: 0.3.5 Architecture: all Maintainer: Julien Danjou Installed-Size: 312 Depends: python (>= 2.5), python-support (>= 0.7.1), lsb-base, python-sqlobject, python-apt Recommends: pbuilder, python-gdchart2, python-webpy Suggests: cowdancer Priority: extra Section: devel Filename: pool/main/r/rebuildd/rebuildd_0.3.5_all.deb Size: 24652 SHA1: 5446cd5c8a29212b403214884cae96f14824a573 MD5sum: 92e81240c2caf286ad103e44dcdc44e1 Description: build daemon aiming at rebuilding Debian packages This software allows you to manage a set of jobs. Each job is a package rebuilding task. Rebuilding is done by pbuilder (or cowbuilder if you want), or anything else, since everything is customizable via configuration file. It can also send build logs by email, event each log can be sent to a different email address. . rebuildd is multi-threaded, so you can run multiple build jobs in parallel. It is also administrable via a telnet interface. A Web interface is also embedded so you can see your jobs queue and watch log file in real-time in your browser. . rebuildd is designed to be run on multiple hosts even with different architecture set, and to parallelize the rebuild tasks. """, "i386": """Package: rebuildd Version: 0.3.5 Architecture: all Maintainer: Julien Danjou Installed-Size: 312 Depends: python (>= 2.5), python-support (>= 0.7.1), lsb-base, python-sqlobject, python-apt Recommends: pbuilder, python-gdchart2, python-webpy Suggests: cowdancer Priority: extra Section: devel Filename: pool/main/r/rebuildd/rebuildd_0.3.5_all.deb Size: 24652 SHA1: 5446cd5c8a29212b403214884cae96f14824a573 MD5sum: 92e81240c2caf286ad103e44dcdc44e1 Description: build daemon aiming at rebuilding Debian packages This software allows you to manage a set of jobs. Each job is a package rebuilding task. Rebuilding is done by pbuilder (or cowbuilder if you want), or anything else, since everything is customizable via configuration file. It can also send build logs by email, event each log can be sent to a different email address. . rebuildd is multi-threaded, so you can run multiple build jobs in parallel. It is also administrable via a telnet interface. A Web interface is also embedded so you can see your jobs queue and watch log file in real-time in your browser. . rebuildd is designed to be run on multiple hosts even with different architecture set, and to parallelize the rebuild tasks. """}}} def create_deb(target_dir, pkg_name, pkg_data): """Create a Debian package in the specified C{target_dir}.""" path = os.path.join(target_dir, pkg_name) data = base64.decodestring(pkg_data) create_file(path, data) def create_simple_repository(target_dir): """Create a simple deb-dir repository with in C{target_dir}.""" create_deb(target_dir, PKGNAME1, PKGDEB1) create_deb(target_dir, PKGNAME2, PKGDEB2) create_deb(target_dir, PKGNAME3, PKGDEB3) def create_full_repository(target_dir): """ Create a full APT repository with a dists/ tree rooted at C{target_dir}. """ class Repository(object): codename = "hardy" variant = "hardy-updates" components = ["main", "restricted"] archs = ["amd64", "i386"] hashes = [HASH4, HASH5, HASH6, HASH7] def __init__(self, root): self.root = root self.url = "file://%s" % self.root repository = Repository(target_dir) dists_directory = os.path.join(repository.root, "dists") os.mkdir(dists_directory) for dist in [repository.codename, repository.variant]: dist_directory = os.path.join(dists_directory, dist) os.mkdir(dist_directory) fd = open(os.path.join(dist_directory, "Release"), "w") fd.write(RELEASES[dist]) fd.close() for component in repository.components: component_directory = os.path.join(dist_directory, component) os.mkdir(component_directory) for arch in repository.archs: arch_directory = os.path.join(component_directory, "binary-%s" % arch) os.mkdir(arch_directory) fd = open(os.path.join(arch_directory, "Packages"), "w") fd.write(PACKAGES[dist][component][arch]) fd.close() fd = open(os.path.join(arch_directory, "Release"), "w") fd.write("""Version: 8.04 Component: %s Origin: Ubuntu Label: Ubuntu Architecture: %s """ % (component, arch)) fd.close() return repository landscape-client-14.01/landscape/package/tests/test_reporter.py0000644000175000017500000020640212301414317024503 0ustar andreasandreasimport sys import os import time import apt_pkg from twisted.internet.defer import Deferred, succeed, inlineCallbacks from twisted.internet import reactor from landscape.lib.fs import create_file, touch_file from landscape.lib.fetch import fetch_async, FetchError from landscape.lib import bpickle from landscape.package.store import ( PackageStore, UnknownHashIDRequest, FakePackageStore) from landscape.package.reporter import ( PackageReporter, HASH_ID_REQUEST_TIMEOUT, main, find_reporter_command, PackageReporterConfiguration, FakeGlobalReporter, FakeReporter) from landscape.package import reporter from landscape.package.facade import AptFacade from landscape.package.tests.helpers import ( AptFacadeHelper, SimpleRepositoryHelper, HASH1, HASH2, HASH3, PKGNAME1) from landscape.tests.helpers import ( LandscapeTest, BrokerServiceHelper, EnvironSaverHelper) from landscape.tests.mocker import ANY SAMPLE_LSB_RELEASE = "DISTRIB_CODENAME=codename\n" class PackageReporterConfigurationTest(LandscapeTest): def test_force_apt_update_option(self): """ The L{PackageReporterConfiguration} supports a '--force-apt-update' command line option. """ config = PackageReporterConfiguration() config.default_config_filenames = (self.makeFile(""), ) self.assertFalse(config.force_apt_update) config.load(["--force-apt-update"]) self.assertTrue(config.force_apt_update) class PackageReporterAptTest(LandscapeTest): helpers = [AptFacadeHelper, SimpleRepositoryHelper, BrokerServiceHelper] Facade = AptFacade def setUp(self): super(PackageReporterAptTest, self).setUp() self.store = PackageStore(self.makeFile()) self.config = PackageReporterConfiguration() self.reporter = PackageReporter( self.store, self.facade, self.remote, self.config) self.reporter.get_session_id() # Assume update-notifier-common stamp file is not present by # default. self.reporter.update_notifier_stamp = "/Not/Existing" self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) self.check_stamp_file = self.config.detect_package_changes_stamp def _clear_repository(self): """Remove all packages from self.repository.""" create_file(self.repository_dir + "/Packages", "") def set_pkg1_upgradable(self): """Make it so that package "name1" is considered to be upgradable. Return the hash of the package that upgrades "name1". """ self._add_package_to_deb_dir( self.repository_dir, "name1", version="version2") self.facade.reload_channels() name1_upgrade = sorted(self.facade.get_packages_by_name("name1"))[1] return self.facade.get_package_hash(name1_upgrade) def set_pkg1_installed(self): """Make it so that package "name1" is considered installed.""" self._install_deb_file(os.path.join(self.repository_dir, PKGNAME1)) def _make_fake_apt_update(self, out="output", err="error", code=0): """Create a fake apt-update executable""" self.reporter.apt_update_filename = self.makeFile( "#!/bin/sh\n" "echo -n '%s'\n" "echo -n '%s' >&2\n" "exit %d" % (out, err, code)) os.chmod(self.reporter.apt_update_filename, 0755) def test_set_package_ids_with_all_known(self): self.store.add_hash_id_request(["hash1", "hash2"]) request2 = self.store.add_hash_id_request(["hash3", "hash4"]) self.store.add_hash_id_request(["hash5", "hash6"]) self.store.add_task("reporter", {"type": "package-ids", "ids": [123, 456], "request-id": request2.id}) def got_result(result): self.assertEqual(self.store.get_hash_id("hash1"), None) self.assertEqual(self.store.get_hash_id("hash2"), None) self.assertEqual(self.store.get_hash_id("hash3"), 123) self.assertEqual(self.store.get_hash_id("hash4"), 456) self.assertEqual(self.store.get_hash_id("hash5"), None) self.assertEqual(self.store.get_hash_id("hash6"), None) deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) def test_set_package_ids_with_unknown_request_id(self): self.store.add_task("reporter", {"type": "package-ids", "ids": [123, 456], "request-id": 123}) # Nothing bad should happen. return self.reporter.handle_tasks() def test_set_package_ids_with_unknown_hashes(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["add-packages"]) request1 = self.store.add_hash_id_request(["foo", HASH1, "bar"]) self.store.add_task("reporter", {"type": "package-ids", "ids": [123, None, 456], "request-id": request1.id}) def got_result(result): message = message_store.get_pending_messages()[0] # The server will answer the "add-packages" message with a # "package-ids" message, so we must keep track of the hashes # for packages sent. request2 = self.store.get_hash_id_request(message["request-id"]) self.assertEqual(request2.hashes, [HASH1]) # Keeping track of the message id for the message with the # package data allows us to tell if we should consider our # request as lost for some reason, and thus re-request it. message_id = request2.message_id self.assertEqual(type(message_id), int) self.assertTrue(message_store.is_pending(message_id)) self.assertMessages( message_store.get_pending_messages(), [{"packages": [{"description": u"Description1", "installed-size": 28672, "name": u"name1", "relations": [(131074, u"providesname1"), (196610, u"name1 = version1-release1"), (262148, u"prerequirename1 = prerequireversion1"), (262148, u"requirename1 = requireversion1"), (393224, u"name1 < version1-release1"), (458768, u"conflictsname1 = conflictsversion1")], "section": u"Group1", "size": 1038, "summary": u"Summary1", "type": 65537, "version": u"version1-release1"}], "request-id": request2.id, "type": "add-packages"}]) deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) def test_set_package_ids_with_unknown_hashes_and_size_none(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["add-packages"]) request1 = self.store.add_hash_id_request(["foo", HASH1, "bar"]) self.store.add_task("reporter", {"type": "package-ids", "ids": [123, None, 456], "request-id": request1.id}) def got_result(result): message = message_store.get_pending_messages()[0] request2 = self.store.get_hash_id_request(message["request-id"]) self.assertMessages( message_store.get_pending_messages(), [{"packages": [{"description": u"Description1", "installed-size": None, "name": u"name1", "relations": [], "section": u"Group1", "size": None, "summary": u"Summary1", "type": 65537, "version": u"version1-release1"}], "request-id": request2.id, "type": "add-packages"}]) class FakePackage(object): type = 65537 name = u"name1" version = u"version1-release1" section = u"Group1" summary = u"Summary1" description = u"Description1" size = None installed_size = None relations = [] mock_facade = self.mocker.patch(self.Facade) mock_facade.get_package_skeleton(ANY) self.mocker.result(FakePackage()) self.mocker.replay() deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) def test_set_package_ids_with_unknown_hashes_and_failed_send_msg(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["add-packages"]) class Boom(Exception): pass deferred = Deferred() deferred.errback(Boom()) remote_mock = self.mocker.patch(self.reporter._broker) remote_mock.send_message(ANY, ANY, True) self.mocker.result(deferred) self.mocker.replay() request_id = self.store.add_hash_id_request(["foo", HASH1, "bar"]).id self.store.add_task("reporter", {"type": "package-ids", "ids": [123, None, 456], "request-id": request_id}) def got_result(result): self.assertMessages(message_store.get_pending_messages(), []) self.assertEqual( [request.id for request in self.store.iter_hash_id_requests()], [request_id]) result = self.reporter.handle_tasks() self.assertFailure(result, Boom) return result.addCallback(got_result) def test_set_package_ids_removes_request_id_when_done(self): request = self.store.add_hash_id_request(["hash1"]) self.store.add_task("reporter", {"type": "package-ids", "ids": [123], "request-id": request.id}) def got_result(result): self.assertRaises(UnknownHashIDRequest, self.store.get_hash_id_request, request.id) deferred = self.reporter.handle_tasks() return deferred.addCallback(got_result) def test_fetch_hash_id_db(self): # Assume package_hash_id_url is set self.config.data_path = self.makeDir() self.config.package_hash_id_url = "http://fake.url/path/" os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) hash_id_db_filename = os.path.join(self.config.data_path, "package", "hash-id", "uuid_codename_arch") # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Let's say fetch_async is successful hash_id_db_url = self.config.package_hash_id_url + "uuid_codename_arch" fetch_async_mock = self.mocker.replace("landscape.lib." "fetch.fetch_async") fetch_async_mock(hash_id_db_url, cainfo=None) fetch_async_result = Deferred() fetch_async_result.callback("hash-ids") self.mocker.result(fetch_async_result) # The download should be properly logged logging_mock = self.mocker.replace("logging.info") logging_mock("Downloaded hash=>id database from %s" % hash_id_db_url) self.mocker.result(None) # We don't have our hash=>id database yet self.assertFalse(os.path.exists(hash_id_db_filename)) # Now go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() # Check the database def callback(ignored): self.assertTrue(os.path.exists(hash_id_db_filename)) self.assertEqual(open(hash_id_db_filename).read(), "hash-ids") result.addCallback(callback) return result def test_fetch_hash_id_db_does_not_download_twice(self): # Let's say that the hash=>id database is already there self.config.package_hash_id_url = "http://fake.url/path/" self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) hash_id_db_filename = os.path.join(self.config.data_path, "package", "hash-id", "uuid_codename_arch") open(hash_id_db_filename, "w").write("test") # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Intercept any call to fetch_async fetch_async_mock = self.mocker.replace("landscape.lib." "fetch.fetch_async") fetch_async_mock(ANY) # Go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() def callback(ignored): # Check that fetch_async hasn't been called self.assertRaises(AssertionError, self.mocker.verify) fetch_async(None) # The hash=>id database is still there self.assertEqual(open(hash_id_db_filename).read(), "test") result.addCallback(callback) return result def test_fetch_hash_id_db_undetermined_server_uuid(self): """ If the server-uuid can't be determined for some reason, no download should be attempted and the failure should be properly logged. """ message_store = self.broker_service.message_store message_store.set_server_uuid(None) logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "server UUID not available") self.mocker.result(None) self.mocker.replay() result = self.reporter.fetch_hash_id_db() return result def test_fetch_hash_id_db_undetermined_codename(self): # Fake uuid message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") # Undetermined codename self.reporter.lsb_release_filename = self.makeFile("Foo=bar") # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "missing code-name key in %s" % self.reporter.lsb_release_filename) self.mocker.result(None) # Go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() return result def test_fetch_hash_id_db_undetermined_arch(self): # Fake uuid and codename message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) # Undetermined arch self.facade.set_arch("") # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "unknown dpkg architecture") self.mocker.result(None) # Go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() return result def test_fetch_hash_id_db_with_default_url(self): # Let's say package_hash_id_url is not set but url is self.config.data_path = self.makeDir() self.config.package_hash_id_url = None self.config.url = "http://fake.url/path/message-system/" os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) hash_id_db_filename = os.path.join(self.config.data_path, "package", "hash-id", "uuid_codename_arch") # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Check fetch_async is called with the default url hash_id_db_url = "http://fake.url/path/hash-id-databases/" \ "uuid_codename_arch" fetch_async_mock = self.mocker.replace("landscape.lib." "fetch.fetch_async") fetch_async_mock(hash_id_db_url, cainfo=None) fetch_async_result = Deferred() fetch_async_result.callback("hash-ids") self.mocker.result(fetch_async_result) # Now go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() # Check the database def callback(ignored): self.assertTrue(os.path.exists(hash_id_db_filename)) self.assertEqual(open(hash_id_db_filename).read(), "hash-ids") result.addCallback(callback) return result def test_fetch_hash_id_db_with_download_error(self): # Assume package_hash_id_url is set self.config.data_path = self.makeDir() self.config.package_hash_id_url = "http://fake.url/path/" # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Let's say fetch_async fails hash_id_db_url = self.config.package_hash_id_url + "uuid_codename_arch" fetch_async_mock = self.mocker.replace("landscape.lib." "fetch.fetch_async") fetch_async_mock(hash_id_db_url, cainfo=None) fetch_async_result = Deferred() fetch_async_result.errback(FetchError("fetch error")) self.mocker.result(fetch_async_result) # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't download hash=>id database: fetch error") self.mocker.result(None) # Now go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() # We shouldn't have any hash=>id database def callback(ignored): hash_id_db_filename = os.path.join( self.config.data_path, "package", "hash-id", "uuid_codename_arch") self.assertEqual(os.path.exists(hash_id_db_filename), False) result.addCallback(callback) return result def test_fetch_hash_id_db_with_undetermined_url(self): # We don't know where to fetch the hash=>id database from self.config.url = None self.config.package_hash_id_url = None # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Can't determine the hash=>id database url") self.mocker.result(None) # Let's go self.mocker.replay() result = self.reporter.fetch_hash_id_db() # We shouldn't have any hash=>id database def callback(ignored): hash_id_db_filename = os.path.join( self.config.data_path, "package", "hash-id", "uuid_codename_arch") self.assertEqual(os.path.exists(hash_id_db_filename), False) result.addCallback(callback) return result def test_fetch_hash_id_db_with_custom_certificate(self): """ The L{PackageReporter.fetch_hash_id_db} method takes into account the possible custom SSL certificate specified in the client configuration. """ self.config.url = "http://fake.url/path/message-system/" self.config.ssl_public_key = "/some/key" # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.reporter.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Check fetch_async is called with the default url hash_id_db_url = "http://fake.url/path/hash-id-databases/" \ "uuid_codename_arch" fetch_async_mock = self.mocker.replace("landscape.lib." "fetch.fetch_async") fetch_async_mock(hash_id_db_url, cainfo=self.config.ssl_public_key) fetch_async_result = Deferred() fetch_async_result.callback("hash-ids") self.mocker.result(fetch_async_result) # Now go! self.mocker.replay() result = self.reporter.fetch_hash_id_db() return result def test_wb_apt_sources_have_changed(self): """ The L{PackageReporter._apt_sources_have_changed} method returns a bool indicating if the APT sources list file has changed recently. """ self.reporter.sources_list_filename = "/I/Dont/Exist" self.reporter.sources_list_directory = "/I/Dont/Exist/At/All" self.assertFalse(self.reporter._apt_sources_have_changed()) self.reporter.sources_list_filename = self.makeFile("foo") self.assertTrue(self.reporter._apt_sources_have_changed()) os.utime(self.reporter.sources_list_filename, (-1, time.time() - 1799)) self.assertTrue(self.reporter._apt_sources_have_changed()) os.utime(self.reporter.sources_list_filename, (-1, time.time() - 1800)) self.assertFalse(self.reporter._apt_sources_have_changed()) def test_wb_apt_sources_have_changed_with_directory(self): """ The L{PackageReporter._apt_sources_have_changed} checks also for possible additional sources files under /etc/apt/sources.d. """ self.reporter.sources_list_filename = "/I/Dont/Exist/At/All" self.reporter.sources_list_directory = self.makeDir() self.makeFile(dirname=self.reporter.sources_list_directory, content="deb http://foo ./") self.assertTrue(self.reporter._apt_sources_have_changed()) def test_remove_expired_hash_id_request(self): request = self.store.add_hash_id_request(["hash1"]) request.message_id = 9999 request.timestamp -= HASH_ID_REQUEST_TIMEOUT def got_result(result): self.assertRaises(UnknownHashIDRequest, self.store.get_hash_id_request, request.id) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) def test_remove_expired_hash_id_request_wont_remove_before_timeout(self): request1 = self.store.add_hash_id_request(["hash1"]) request1.message_id = 9999 request1.timestamp -= HASH_ID_REQUEST_TIMEOUT / 2 initial_timestamp = request1.timestamp def got_result(result): request2 = self.store.get_hash_id_request(request1.id) self.assertTrue(request2) # Shouldn't update timestamp when already delivered. self.assertEqual(request2.timestamp, initial_timestamp) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) def test_remove_expired_hash_id_request_updates_timestamps(self): request = self.store.add_hash_id_request(["hash1"]) message_store = self.broker_service.message_store message_id = message_store.add({"type": "add-packages", "packages": [], "request-id": request.id}) request.message_id = message_id initial_timestamp = request.timestamp def got_result(result): self.assertTrue(request.timestamp > initial_timestamp) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) def test_remove_expired_hash_id_request_removes_when_no_message_id(self): request = self.store.add_hash_id_request(["hash1"]) def got_result(result): self.assertRaises(UnknownHashIDRequest, self.store.get_hash_id_request, request.id) result = self.reporter.remove_expired_hash_id_requests() return result.addCallback(got_result) def test_request_unknown_hashes(self): self.store.set_hash_ids({HASH2: 123}) message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"hashes": EqualsHashes(HASH1, HASH3), "request-id": 1, "type": "unknown-package-hashes"}]) message = message_store.get_pending_messages()[0] request = self.store.get_hash_id_request(1) self.assertEqual(request.hashes, message["hashes"]) self.assertTrue(message_store.is_pending(request.message_id)) result = self.reporter.request_unknown_hashes() return result.addCallback(got_result) def test_request_unknown_hashes_limits_number_of_packages(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) self.addCleanup(setattr, reporter, "MAX_UNKNOWN_HASHES_PER_REQUEST", reporter.MAX_UNKNOWN_HASHES_PER_REQUEST) reporter.MAX_UNKNOWN_HASHES_PER_REQUEST = 2 def got_result1(result): # The first message sent should send any 2 of the 3 hashes. self.assertEqual(len(message_store.get_pending_messages()), 1) message = message_store.get_pending_messages()[-1] self.assertEqual(len(message["hashes"]), 2) result2 = self.reporter.request_unknown_hashes() result2.addCallback(got_result2, message["hashes"]) return result2 def got_result2(result, hashes): # The second message sent should send the missing hash. self.assertEqual(len(message_store.get_pending_messages()), 2) message = message_store.get_pending_messages()[-1] self.assertEqual(len(message["hashes"]), 1) self.assertNotIn(message["hashes"][0], hashes) result1 = self.reporter.request_unknown_hashes() result1.addCallback(got_result1) return result1 def test_request_unknown_hashes_with_previously_requested(self): """ In this test we'll pretend that a couple of hashes were previously requested, and there's one new hash to be requested. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) self.store.add_hash_id_request([HASH1, HASH3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"hashes": [HASH2], "request-id": 2, "type": "unknown-package-hashes"}]) message = message_store.get_pending_messages()[0] request = self.store.get_hash_id_request(2) self.assertEqual(request.hashes, message["hashes"]) self.assertTrue(message_store.is_pending(request.message_id)) result = self.reporter.request_unknown_hashes() return result.addCallback(got_result) def test_request_unknown_hashes_with_all_previously_requested(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) self.store.add_hash_id_request([HASH1, HASH2, HASH3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), []) result = self.reporter.request_unknown_hashes() return result.addCallback(got_result) def test_request_unknown_hashes_with_failing_send_message(self): """ When broker.send_message() fails, the hash_id_request shouldn't even be stored, because we have no message_id. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["unknown-package-hashes"]) class Boom(Exception): pass deferred = Deferred() deferred.errback(Boom()) remote_mock = self.mocker.patch(self.reporter._broker) remote_mock.send_message(ANY, ANY, True) self.mocker.result(deferred) self.mocker.replay() def got_result(result): self.assertMessages(message_store.get_pending_messages(), []) self.assertEqual(list(self.store.iter_hash_id_requests()), []) result = self.reporter.request_unknown_hashes() self.assertFailure(result, Boom) return result.addCallback(got_result) def test_detect_packages_creates_stamp_file(self): """ When the detect_packages method computes package changes, it creates a stamp file after sending the message. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.assertFalse(os.path.exists(self.check_stamp_file)) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available": [(1, 3)]}]) self.assertTrue(os.path.exists(self.check_stamp_file)) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_available(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available": [(1, 3)]}]) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_available_and_unknown_hash(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH3: 3}) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available": [1, 3]}]) self.assertEqual(sorted(self.store.get_available()), [1, 3]) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_available_and_previously_known(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available": [2]}]) self.assertEqual(sorted(self.store.get_available()), [1, 2, 3]) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_not_available(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self._clear_repository() self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "not-available": [(1, 3)]}]) self.assertEqual(self.store.get_available(), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_installed(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.set_pkg1_installed() def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "installed": [1]}]) self.assertEqual(self.store.get_installed(), [1]) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_installed_already_known(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.store.add_installed([1]) self.set_pkg1_installed() def got_result(result): self.assertFalse(result) self.assertMessages(message_store.get_pending_messages(), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_not_installed(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.store.add_installed([1]) def got_result(result): self.assertTrue(result) self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "not-installed": [1]}]) self.assertEqual(self.store.get_installed(), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_upgrade_but_not_installed(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) upgrade_hash = self.set_pkg1_upgradable() self.store.set_hash_ids({HASH1: 1, upgrade_hash: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_upgrade(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) upgrade_hash = self.set_pkg1_upgradable() self.set_pkg1_installed() self.facade.reload_channels() self.store.set_hash_ids( {HASH1: 1, upgrade_hash: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.store.add_installed([1]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available-upgrades": [2]}]) self.assertEqual(self.store.get_available_upgrades(), [2]) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_packages_changes_with_not_upgrade(self): message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) self.store.add_available_upgrades([2]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "not-available-upgrades": [2]}]) self.assertEqual(self.store.get_available_upgrades(), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) @inlineCallbacks def test_detect_packages_after_tasks(self): """ When the L{PackageReporter} got a task to handle, it forces itself to detect package changes, not checking the local state of package. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) touch_file(self.check_stamp_file) self.store.add_task("reporter", {"type": "package-ids", "ids": [123, 456], "request-id": 123}) yield self.reporter.handle_tasks() yield self.reporter.detect_packages_changes() # We check that detect changes run by looking at messages self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "available": [(1, 3)]}]) def test_detect_packages_changes_with_not_locked_and_ranges(self): """ Ranges are used when reporting changes to 3 or more not locked packages having consecutive ids. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["packages"]) self.store.add_locked([1, 2, 3]) self.store.set_hash_ids({HASH1: 1, HASH2: 2, HASH3: 3}) self.store.add_available([1, 2, 3]) def got_result(result): self.assertMessages(message_store.get_pending_messages(), [{"type": "packages", "not-locked": [(1, 3)]}]) self.assertEqual(sorted(self.store.get_locked()), []) result = self.reporter.detect_packages_changes() return result.addCallback(got_result) def test_detect_changes_considers_packages_changes(self): """ The L{PackageReporter.detect_changes} method package changes. """ reporter_mock = self.mocker.patch(self.reporter) reporter_mock.detect_packages_changes() self.mocker.result(succeed(True)) self.mocker.replay() return self.reporter.detect_changes() def test_detect_changes_fires_package_data_changed(self): """ The L{PackageReporter.detect_changes} method fires an event of type 'package-data-changed' if we detected something has changed with respect to our previous run. """ reporter_mock = self.mocker.patch(self.reporter) reporter_mock.detect_packages_changes() self.mocker.result(succeed(True)) callback = self.mocker.mock() callback() self.mocker.replay() self.broker_service.reactor.call_on("package-data-changed", callback) return self.reporter.detect_changes() def test_run(self): reporter_mock = self.mocker.patch(self.reporter) self.mocker.order() results = [Deferred() for i in range(7)] reporter_mock.run_apt_update() self.mocker.result(results[0]) reporter_mock.fetch_hash_id_db() self.mocker.result(results[1]) reporter_mock.use_hash_id_db() self.mocker.result(results[2]) reporter_mock.handle_tasks() self.mocker.result(results[3]) reporter_mock.remove_expired_hash_id_requests() self.mocker.result(results[4]) reporter_mock.request_unknown_hashes() self.mocker.result(results[5]) reporter_mock.detect_changes() self.mocker.result(results[6]) self.mocker.replay() self.reporter.run() # It must raise an error because deferreds weren't yet fired. self.assertRaises(AssertionError, self.mocker.verify) # Call them in reversed order. It must not make a difference because # Twisted is ensuring that things run in the proper order. for deferred in reversed(results): deferred.callback(None) def test_main(self): run_task_handler = self.mocker.replace("landscape.package.taskhandler" ".run_task_handler", passthrough=False) run_task_handler(PackageReporter, ["ARGS"]) self.mocker.result("RESULT") self.mocker.replay() self.assertEqual(main(["ARGS"]), "RESULT") def test_find_reporter_command(self): dirname = self.makeDir() filename = self.makeFile("", dirname=dirname, basename="landscape-package-reporter") saved_argv = sys.argv try: sys.argv = [os.path.join(dirname, "landscape-monitor")] command = find_reporter_command() self.assertEqual(command, filename) finally: sys.argv = saved_argv def test_resynchronize(self): """ When a resynchronize task arrives, the reporter should clear out all the data in the package store, except the hash ids and the hash ids requests. This is done in the reporter so that we know it happens when no other reporter is possibly running at the same time. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo_hash = self.facade.get_package_hash(foo) self.facade.set_package_hold(foo) self.facade.reload_channels() message_store = self.broker_service.message_store message_store.set_accepted_types(["package-locks"]) self.store.set_hash_ids({foo_hash: 3, HASH2: 4}) self.store.add_available([1]) self.store.add_available_upgrades([2]) self.store.add_installed([2]) self.store.add_locked([3]) request1 = self.store.add_hash_id_request(["hash3"]) request2 = self.store.add_hash_id_request(["hash4"]) # Set the message id to avoid the requests being deleted by the # L{PackageReporter.remove_expired_hash_id_requests} method. request1.message_id = 1 request2.message_id = 2 # Let's make sure the data is there. self.assertEqual(self.store.get_available_upgrades(), [2]) self.assertEqual(self.store.get_available(), [1]) self.assertEqual(self.store.get_installed(), [2]) self.assertEqual(self.store.get_hash_id_request(request1.id).id, request1.id) self.store.add_task("reporter", {"type": "resynchronize"}) deferred = self.reporter.run() def check_result(result): # The hashes should not go away. hash1 = self.store.get_hash_id(foo_hash) hash2 = self.store.get_hash_id(HASH2) self.assertEqual([hash1, hash2], [3, 4]) # But the other data should. self.assertEqual(self.store.get_available_upgrades(), []) # After running the resychronize task, detect_packages_changes is # called, and the existing known hashes are made available. self.assertEqual(self.store.get_available(), [4]) self.assertEqual(self.store.get_installed(), [3]) self.assertEqual(self.store.get_locked(), [3]) # The two original hash id requests should be still there, and # a new hash id request should also be detected for HASH3. requests_count = 0 new_request_found = False for request in self.store.iter_hash_id_requests(): requests_count += 1 if request.id == request1.id: self.assertEqual(request.hashes, ["hash3"]) elif request.id == request2.id: self.assertEqual(request.hashes, ["hash4"]) elif not new_request_found: self.assertEqual(request.hashes, [HASH3, HASH1]) else: self.fail("Unexpected hash-id request!") self.assertEqual(requests_count, 3) deferred.addCallback(check_result) return deferred def test_run_apt_update(self): """ The L{PackageReporter.run_apt_update} method should run apt-update. """ self.reporter.sources_list_filename = "/I/Dont/Exist" self.reporter.sources_list_directory = "/I/Dont/Exist" self._make_fake_apt_update() debug_mock = self.mocker.replace("logging.debug") debug_mock("'%s' exited with status 0 (out='output', err='error')" % self.reporter.apt_update_filename) warning_mock = self.mocker.replace("logging.warning") self.expect(warning_mock(ANY)).count(0) self.mocker.replay() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("output", out) self.assertEqual("error", err) self.assertEqual(0, code) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_with_force_apt_update(self): """ L{PackageReporter.run_apt_update} forces an apt-update run if the '--force-apt-update' command line option was passed. """ self.makeFile("", path=self.config.update_stamp_filename) self.config.load(["--force-apt-update"]) self._make_fake_apt_update() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("output", out) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_with_force_apt_update_if_sources_changed(self): """ L{PackageReporter.run_apt_update} forces an apt-update run if the APT sources.list file has changed. """ self.assertEqual(self.reporter.sources_list_filename, "/etc/apt/sources.list") self.reporter.sources_list_filename = self.makeFile("deb ftp://url ./") self._make_fake_apt_update() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("output", out) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_warns_about_failures(self): """ The L{PackageReporter.run_apt_update} method should log a warning in case apt-update terminates with a non-zero exit code. """ self._make_fake_apt_update(code=2) logging_mock = self.mocker.replace("logging.warning") logging_mock("'%s' exited with status 2" " (error)" % self.reporter.apt_update_filename) self.mocker.replay() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("output", out) self.assertEqual("error", err) self.assertEqual(2, code) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_report_apt_failure(self): """ If L{PackageReporter.run_apt_update} fails, a message is sent to the server reporting the error, to be able to fix the problem centrally. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update(code=2) deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 2, "err": u"error"}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_report_no_sources(self): """ L{PackageReporter.run_apt_update} reports a failure if apt succeeds but there are no APT sources defined. APT doesn't fail if there are no sources, but we fake a failure in order to re-use the PackageReporterAlert on the server. """ self.facade.reset_channels() message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): error = "There are no APT sources configured in %s or %s." % ( self.reporter.sources_list_filename, self.reporter.sources_list_directory) self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 1, "err": error}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_report_apt_failure_no_sources(self): """ If L{PackageReporter.run_apt_update} fails and there are no APT sources configured, the APT error takes precedence. """ self.facade.reset_channels() message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update(code=2) deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 2, "err": u"error"}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_report_success(self): """ L{PackageReporter.run_apt_update} also reports success to be able to know the proper state of the client. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update(err="message") deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 0, "err": u"message"}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_no_run_in_interval(self): """ The L{PackageReporter.run_apt_update} logs a debug message if apt-update doesn't run because interval has not passed. """ self.reporter._apt_sources_have_changed = lambda: False self.makeFile("", path=self.config.update_stamp_filename) logging_mock = self.mocker.replace("logging.debug") logging_mock("'%s' didn't run, update interval has not passed" % self.reporter.apt_update_filename) self.mocker.replay() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("", out) self.assertEqual("", err) self.assertEqual(0, code) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_no_run_update_notifier_stamp_in_interval(self): """ The L{PackageReporter.run_apt_update} doesn't runs apt-update if the interval is passed but the stamp file from update-notifier-common reports that 'apt-get update' has been run in the interval. """ self.reporter._apt_sources_have_changed = lambda: False # The interval for the apt-update stamp file is expired. self.makeFile("", path=self.config.update_stamp_filename) expired_time = time.time() - self.config.apt_update_interval - 1 os.utime( self.config.update_stamp_filename, (expired_time, expired_time)) # The interval for the update-notifier-common stamp file is not # expired. self.reporter.update_notifier_stamp = self.makeFile("") logging_mock = self.mocker.replace("logging.debug") logging_mock("'%s' didn't run, update interval has not passed" % self.reporter.apt_update_filename) self.mocker.replay() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback((out, err, code)): self.assertEqual("", out) self.assertEqual("", err) self.assertEqual(0, code) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_runs_interval_expired(self): """ L{PackageReporter.run_apt_update} runs if both apt-update and update-notifier-common stamp files are present and the time interval has passed. """ self.reporter._apt_sources_have_changed = lambda: False expired_time = time.time() - self.config.apt_update_interval - 1 # The interval for both stamp files is expired. self.makeFile("", path=self.config.update_stamp_filename) os.utime( self.config.update_stamp_filename, (expired_time, expired_time)) self.reporter.update_notifier_stamp = self.makeFile("") os.utime( self.reporter.update_notifier_stamp, (expired_time, expired_time)) message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update(err="message") deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 0, "err": u"message"}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_touches_stamp_file(self): """ The L{PackageReporter.run_apt_update} method touches a stamp file after running the apt-update wrapper. """ self.reporter.sources_list_filename = "/I/Dont/Exist" self._make_fake_apt_update() deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignored): self.assertTrue( os.path.exists(self.config.update_stamp_filename)) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_error_on_cache_file(self): """ L{PackageReporter.run_apt_update} succeeds if the command fails because one of the cache files is not found. This generally occurs if 'apt-get clean' has been concurrently run with 'apt-get update'. This is not an issue for the package lists update. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update( code=2, out="not important", err=("E: Problem renaming the file " "/var/cache/apt/pkgcache.bin.6ZsRSX to " "/var/cache/apt/pkgcache.bin - rename (2: No such file " "or directory)\n" "W: You may want to run apt-get update to correct these " "problems")) deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 0, "err": u""}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_run_apt_update_error_no_cache_files(self): """ L{PackageReporter.run_apt_update} succeeds if the command fails because cache files are not found. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self._make_fake_apt_update( code=2, out="not important", err=("E: Problem renaming the file " "/var/cache/apt/srcpkgcache.bin.Pw1Zxy to " "/var/cache/apt/srcpkgcache.bin - rename (2: No such file " "or directory)\n" "E: Problem renaming the file " "/var/cache/apt/pkgcache.bin.wz8ooS to " "/var/cache/apt/pkgcache.bin - rename (2: No such file " "or directory)\n" "E: The package lists or status file could not be parsed " "or opened.")) deferred = Deferred() def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages( message_store.get_pending_messages(), [{"type": "package-reporter-result", "code": 0, "err": u""}]) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_config_apt_update_interval(self): """ L{PackageReporter} uses the C{apt_update_interval} configuration parameter to check the age of the update stamp file. """ self.config.apt_update_interval = 1234 message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) intervals = [] def apt_update_timeout_expired(interval): intervals.append(interval) return False deferred = Deferred() self.reporter._apt_sources_have_changed = lambda: False self.reporter._apt_update_timeout_expired = apt_update_timeout_expired def do_test(): result = self.reporter.run_apt_update() def callback(ignore): self.assertMessages(message_store.get_pending_messages(), []) self.assertEqual([1234], intervals) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_detect_packages_doesnt_creates_stamp_files(self): """ Stamp file is created if not present, and the method returns that the information changed in that case. """ result = self.reporter._package_state_has_changed() self.assertTrue(result) self.assertFalse(os.path.exists(self.check_stamp_file)) def test_detect_packages_changes_returns_false_if_unchanged(self): """ If a monitored file is not changed (touched), the method returns False. """ touch_file(self.check_stamp_file, offset_seconds=2) result = self.reporter._package_state_has_changed() self.assertFalse(result) def test_detect_packages_changes_returns_true_if_changed(self): """ If a monitored file is changed (touched), the method returns True. """ status_file = apt_pkg.config.find_file("dir::state::status") touch_file(status_file) touch_file(self.check_stamp_file) touch_file(status_file) result = self.reporter._package_state_has_changed() self.assertTrue(result) def test_detect_packages_changes_works_for_list_files(self): """ If a list file is touched, the method returns True. """ status_file = apt_pkg.config.find_file("dir::state::status") touch_file(status_file) touch_file(self.check_stamp_file) list_dir = apt_pkg.config.find_dir("dir::state::lists") # There are no *Packages files in the fixures, let's create one. touch_file(os.path.join(list_dir, "testPackages")) result = self.reporter._package_state_has_changed() self.assertTrue(result) def test_detect_packages_changes_detects_removed_list_file(self): """ If a list file is removed from the system, the method returns True. """ list_dir = apt_pkg.config.find_dir("dir::state::lists") test_file = os.path.join(list_dir, "testPackages") touch_file(test_file) touch_file(self.check_stamp_file) os.remove(test_file) result = self.reporter._package_state_has_changed() self.assertTrue(result) class GlobalPackageReporterAptTest(LandscapeTest): helpers = [AptFacadeHelper, SimpleRepositoryHelper, BrokerServiceHelper] def setUp(self): super(GlobalPackageReporterAptTest, self).setUp() self.store = FakePackageStore(self.makeFile()) self.config = PackageReporterConfiguration() self.reporter = FakeGlobalReporter( self.store, self.facade, self.remote, self.config) # Assume update-notifier-common stamp file is not present by # default. self.reporter.update_notifier_stamp = "/Not/Existing" self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) def test_store_messages(self): """ L{FakeGlobalReporter} stores messages which are sent. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) self.reporter.apt_update_filename = self.makeFile( "#!/bin/sh\necho -n error >&2\necho -n output\nexit 0") os.chmod(self.reporter.apt_update_filename, 0755) deferred = Deferred() def do_test(): self.reporter.get_session_id() result = self.reporter.run_apt_update() def callback(ignore): message = {"type": "package-reporter-result", "code": 0, "err": u"error"} self.assertMessages( message_store.get_pending_messages(), [message]) stored = list(self.store._db.execute( "SELECT id, data FROM message").fetchall()) self.assertEqual(1, len(stored)) self.assertEqual(1, stored[0][0]) self.assertEqual(message, bpickle.loads(str(stored[0][1]))) result.addCallback(callback) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred class FakePackageReporterTest(LandscapeTest): helpers = [EnvironSaverHelper, BrokerServiceHelper] def setUp(self): super(FakePackageReporterTest, self).setUp() self.store = FakePackageStore(self.makeFile()) global_file = self.makeFile() self.global_store = FakePackageStore(global_file) os.environ["FAKE_PACKAGE_STORE"] = global_file self.config = PackageReporterConfiguration() self.reporter = FakeReporter( self.store, None, self.remote, self.config) self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) def test_send_messages(self): """ L{FakeReporter} sends messages stored in the global store specified by C{FAKE_PACKAGE_STORE}. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) message = {"type": "package-reporter-result", "code": 0, "err": u"error"} self.global_store.save_message(message) def check(ignore): messages = message_store.get_pending_messages() self.assertMessages( messages, [message]) stored = list(self.store._db.execute( "SELECT id FROM message").fetchall()) self.assertEqual(1, len(stored)) self.assertEqual(1, stored[0][0]) deferred = self.reporter.run() deferred.addCallback(check) return deferred def test_filter_message_type(self): """ L{FakeReporter} only sends one message of each type per run. """ message_store = self.broker_service.message_store message_store.set_accepted_types(["package-reporter-result"]) message1 = {"type": "package-reporter-result", "code": 0, "err": u"error"} self.global_store.save_message(message1) message2 = {"type": "package-reporter-result", "code": 1, "err": u"error"} self.global_store.save_message(message2) def check1(ignore): self.assertMessages( message_store.get_pending_messages(), [message1]) stored = list(self.store._db.execute( "SELECT id FROM message").fetchall()) self.assertEqual(1, stored[0][0]) return self.reporter.run().addCallback(check2) def check2(ignore): self.assertMessages( message_store.get_pending_messages(), [message1, message2]) stored = list(self.store._db.execute( "SELECT id FROM message").fetchall()) self.assertEqual(2, len(stored)) self.assertEqual(1, stored[0][0]) self.assertEqual(2, stored[1][0]) return self.reporter.run().addCallback(check1) class EqualsHashes(object): def __init__(self, *hashes): self._hashes = sorted(hashes) def __eq__(self, other): return self._hashes == sorted(other) landscape-client-14.01/landscape/package/tests/test_facade.py0000644000175000017500000033121512301414317024045 0ustar andreasandreasimport os import sys import textwrap import tempfile import apt_pkg from apt.package import Package from aptsources.sourceslist import SourcesList from apt.cache import LockFailedException from landscape.constants import UBUNTU_PATH from landscape.lib.fs import read_file, create_file from landscape.package.facade import ( TransactionError, DependencyError, ChannelError, AptFacade, LandscapeInstallProgress) from landscape.tests.mocker import ANY from landscape.tests.helpers import LandscapeTest, EnvironSaverHelper from landscape.package.tests.helpers import ( HASH1, HASH2, HASH3, PKGNAME1, PKGNAME2, PKGNAME3, PKGDEB1, PKGNAME_MINIMAL, PKGDEB_MINIMAL, create_deb, AptFacadeHelper, create_simple_repository) class FakeOwner(object): """Fake Owner object that apt.progress.text.AcquireProgress expects.""" def __init__(self, filesize, error_text=""): self.id = None self.filesize = filesize self.complete = False self.status = None self.STAT_DONE = object() self.error_text = error_text class FakeFetchItem(object): """Fake Item object that apt.progress.text.AcquireProgress expects.""" def __init__(self, owner, description): self.owner = owner self.description = description class AptFacadeTest(LandscapeTest): helpers = [AptFacadeHelper, EnvironSaverHelper] def setUp(self): super(AptFacadeTest, self).setUp() self.facade.max_dpkg_retries = 0 self.facade.dpkg_retry_sleep = 0 def version_sortkey(self, version): """Return a key by which a Version object can be sorted.""" return (version.package, version) def patch_cache_commit(self, commit_function=None): """Patch the apt cache's commit function as to not call dpkg. @param commit_function: A function accepting two parameters, fetch_progress and install_progress. """ def commit(fetch_progress, install_progress): install_progress.dpkg_exited = True if commit_function: commit_function(fetch_progress, install_progress) self.facade._cache.commit = commit def test_default_root(self): """ C{AptFacade} can be created by not providing a root directory, which means that the currently configured root (most likely /) will be used. """ original_dpkg_root = apt_pkg.config.get("Dir") facade = AptFacade() self.assertEqual(original_dpkg_root, apt_pkg.config.get("Dir")) # Make sure that at least reloading the channels work. facade.reload_channels() def test_custom_root_create_required_files(self): """ If a custom root is passed to the constructor, the directory and files that apt expects to be there will be created. """ root = self.makeDir() AptFacade(root=root) self.assertTrue(os.path.exists(os.path.join(root, "etc", "apt"))) self.assertTrue( os.path.exists(os.path.join(root, "etc", "apt", "sources.list.d"))) self.assertTrue(os.path.exists( os.path.join(root, "var", "cache", "apt", "archives", "partial"))) self.assertTrue(os.path.exists( os.path.join(root, "var", "lib", "apt", "lists", "partial"))) self.assertTrue( os.path.exists(os.path.join(root, "var", "lib", "dpkg", "status"))) def test_no_system_packages(self): """ If the dpkg status file is empty, not packages are reported by C{get_packages()}. """ self.facade.reload_channels() self.assertEqual([], list(self.facade.get_packages())) def test_get_packages_single_version(self): """ If the dpkg status file contains some packages, those packages are reported by C{get_packages()}. """ self._add_system_package("foo") self._add_system_package("bar") self.facade.reload_channels() self.assertEqual( ["bar", "foo"], sorted(version.package.name for version in self.facade.get_packages())) def test_get_packages_multiple_version(self): """ If there are multiple versions of a package, C{get_packages()} returns one object per version. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() self.assertEqual( [("foo", "1.0"), ("foo", "1.5")], sorted((version.package.name, version.version) for version in self.facade.get_packages())) def test_get_packages_multiple_architectures(self): """ If there are multiple architectures for a package, only the native architecture is reported by C{get_packages()}. """ apt_pkg.config.clear("APT::Architectures") apt_pkg.config.set("APT::Architecture", "amd64") apt_pkg.config.set("APT::Architectures::", "amd64") apt_pkg.config.set("APT::Architectures::", "i386") facade = AptFacade(apt_pkg.config.get("Dir")) self._add_system_package("foo", version="1.0", architecture="amd64") self._add_system_package("bar", version="1.1", architecture="i386") facade.reload_channels() self.assertEqual([("foo", "1.0")], [(version.package.name, version.version) for version in facade.get_packages()]) def test_add_channel_apt_deb_without_components(self): """ C{add_channel_apt_deb()} adds a new deb URL to a file in sources.list.d. If no components are given, nothing is written after the dist. """ self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") list_filename = ( self.apt_root + "/etc/apt/sources.list.d/_landscape-internal-facade.list") sources_contents = read_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid\n", sources_contents) def test_add_channel_apt_deb_no_duplicate(self): """ C{add_channel_apt_deb} doesn't put duplicate lines in the landscape internal apt sources list. """ self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") list_filename = ( self.apt_root + "/etc/apt/sources.list.d/_landscape-internal-facade.list") sources_contents = read_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid\n", sources_contents) def test_add_channel_apt_deb_with_components(self): """ C{add_channel_apt_deb()} adds a new deb URL to a file in sources.list.d. If components are given, they are included after the dist. """ self.facade.add_channel_apt_deb( "http://example.com/ubuntu", "lucid", ["main", "restricted"]) list_filename = ( self.apt_root + "/etc/apt/sources.list.d/_landscape-internal-facade.list") sources_contents = read_file(list_filename) self.assertEqual( "deb http://example.com/ubuntu lucid main restricted\n", sources_contents) def test_add_channel_deb_dir_adds_deb_channel(self): """ C{add_channel_deb_dir()} adds a deb channel pointing to the directory containing the packages. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.assertEqual(1, len(self.facade.get_channels())) self.assertEqual([{"baseurl": "file://%s" % deb_dir, "distribution": "./", "components": "", "type": "deb"}], self.facade.get_channels()) def test_clear_channels(self): """ C{clear_channels} revoves all the channels added to the facade. It also removes the internal .list file. """ deb_dir = self.makeDir() self.facade.add_channel_deb_dir(deb_dir) self.facade.add_channel_apt_deb("http://example.com/ubuntu", "lucid") self.facade.clear_channels() self.assertEqual([], self.facade.get_channels()) self.assertFalse( os.path.exists(self.facade._get_internal_sources_list())) def test_clear_channels_no_channels(self): """ If no channels have been added, C{clear_channels()} still succeeds. """ self.facade.clear_channels() self.assertEqual([], self.facade.get_channels()) def test_clear_channels_only_internal(self): """ Only channels added through the facade are removed by C{clear_channels}. Other .list files in sources.list.d as well as the sources.list file are intact. """ sources_list_file = apt_pkg.config.find_file("Dir::Etc::sourcelist") sources_list_d_file = os.path.join( apt_pkg.config.find_dir("Dir::Etc::sourceparts"), "example.list") create_file( sources_list_file, "deb http://example1.com/ubuntu lucid main") create_file( sources_list_d_file, "deb http://example2.com/ubuntu lucid main") self.facade.clear_channels() self.assertEqual( [{'baseurl': 'http://example1.com/ubuntu', 'components': 'main', 'distribution': 'lucid', 'type': 'deb'}, {'baseurl': 'http://example2.com/ubuntu', 'components': 'main', 'distribution': 'lucid', 'type': 'deb'}], self.facade.get_channels()) def test_get_package_stanza(self): """ C{get_package_stanza} returns an entry for the package that can be included in a Packages file. """ deb_dir = self.makeDir() create_deb(deb_dir, PKGNAME1, PKGDEB1) deb_file = os.path.join(deb_dir, PKGNAME1) stanza = self.facade.get_package_stanza(deb_file) SHA256 = ( "f899cba22b79780dbe9bbbb802ff901b7e432425c264dc72e6bb20c0061e4f26") self.assertEqual(textwrap.dedent("""\ Package: name1 Priority: optional Section: Group1 Installed-Size: 28 Maintainer: Gustavo Niemeyer Architecture: all Version: version1-release1 Provides: providesname1 Depends: requirename1 (= requireversion1) Pre-Depends: prerequirename1 (= prerequireversion1) Recommends: recommendsname1 (= recommendsversion1) Suggests: suggestsname1 (= suggestsversion1) Conflicts: conflictsname1 (= conflictsversion1) Filename: %(filename)s Size: 1038 MD5sum: efe83eb2b891046b303aaf9281c14e6e SHA1: b4ebcd2b0493008852a4954edc30a236d516c638 SHA256: %(sha256)s Description: Summary1 Description1 """ % {"filename": PKGNAME1, "sha256": SHA256}), stanza) def test_add_channel_deb_dir_creates_packages_file(self): """ C{add_channel_deb_dir} creates a Packages file in the directory with packages. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) packages_contents = read_file(os.path.join(deb_dir, "Packages")) expected_contents = "\n".join( self.facade.get_package_stanza(os.path.join(deb_dir, pkg_name)) for pkg_name in [PKGNAME1, PKGNAME2, PKGNAME3]) self.assertEqual(expected_contents, packages_contents) def test_add_channel_deb_dir_get_packages(self): """ After calling {add_channel_deb_dir} and reloading the channels, the packages in the deb dir is included in the package list. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() self.assertEqual( ["name1", "name2", "name3"], sorted(version.package.name for version in self.facade.get_packages())) def test_get_channels_with_no_channels(self): """ If no deb URLs have been added, C{get_channels()} returns an empty list. """ self.assertEqual([], self.facade.get_channels()) def test_get_channels_with_channels(self): """ If deb URLs have been added, a list of dict is returned with information about the channels. """ self.facade.add_channel_apt_deb( "http://example.com/ubuntu", "lucid", ["main", "restricted"]) self.assertEqual([{"baseurl": "http://example.com/ubuntu", "distribution": "lucid", "components": "main restricted", "type": "deb"}], self.facade.get_channels()) def test_get_channels_with_disabled_channels(self): """ C{get_channels()} doesn't return disabled deb URLs. """ self.facade.add_channel_apt_deb( "http://enabled.example.com/ubuntu", "lucid", ["main"]) self.facade.add_channel_apt_deb( "http://disabled.example.com/ubuntu", "lucid", ["main"]) sources_list = SourcesList() for entry in sources_list: if "disabled" in entry.uri: entry.set_enabled(False) sources_list.save() self.assertEqual([{"baseurl": "http://enabled.example.com/ubuntu", "distribution": "lucid", "components": "main", "type": "deb"}], self.facade.get_channels()) def test_reset_channels(self): """ C{reset_channels()} disables all the configured deb URLs. """ self.facade.add_channel_apt_deb( "http://1.example.com/ubuntu", "lucid", ["main", "restricted"]) self.facade.add_channel_apt_deb( "http://2.example.com/ubuntu", "lucid", ["main", "restricted"]) self.facade.reset_channels() self.assertEqual([], self.facade.get_channels()) def test_reload_includes_added_channels(self): """ When reloading the channels, C{get_packages()} returns the packages in the channel. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() self.assertEqual( ["bar", "foo"], sorted(version.package.name for version in self.facade.get_packages())) def test_reload_channels_refetch_package_index(self): """ If C{refetch_package_index} is True, reload_channels will refetch the Packages files in the channels and rebuild the internal database. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() new_facade = AptFacade(root=self.apt_root) self._add_package_to_deb_dir(deb_dir, "bar") self._touch_packages_file(deb_dir) new_facade.refetch_package_index = True new_facade.reload_channels() self.assertEqual( ["bar", "foo"], sorted(version.package.name for version in new_facade.get_packages())) def test_reload_channels_not_refetch_package_index(self): """ If C{refetch_package_index} is False, reload_channels won't refetch the Packages files in the channels, and instead simply use the internal database that is already there. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() new_facade = AptFacade(root=self.apt_root) self._add_package_to_deb_dir(deb_dir, "bar") self._touch_packages_file(deb_dir) new_facade.refetch_package_index = False new_facade.reload_channels() self.assertEqual( ["foo"], sorted(version.package.name for version in new_facade.get_packages())) def test_reload_channels_force_reload_binaries(self): """ If C{force_reload_binaries} is True, reload_channels will refetch the Packages files in the channels and rebuild the internal database. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() self._add_package_to_deb_dir(deb_dir, "bar") self._touch_packages_file(deb_dir) self.facade.refetch_package_index = False self.facade.reload_channels(force_reload_binaries=True) self.assertEqual( ["bar", "foo"], sorted(version.package.name for version in self.facade.get_packages())) def test_reload_channels_no_force_reload_binaries(self): """ If C{force_reload_binaries} False, C{reload_channels} won't pass a sources_list parameter to limit to update to the internal repos only. """ passed_in_lists = [] def new_apt_update(sources_list=None): passed_in_lists.append(sources_list) self.facade.refetch_package_index = True self.facade._cache.update = new_apt_update self.facade.reload_channels(force_reload_binaries=False) self.assertEqual([None], passed_in_lists) def test_reload_channels_force_reload_binaries_no_internal_repos(self): """ If C{force_reload_binaries} is True, but there are no internal repos, C{reload_channels} won't update the package index if C{refetch_package_index} is False. """ passed_in_lists = [] def apt_update(sources_list=None): passed_in_lists.append(sources_list) self.facade.refetch_package_index = False self.facade._cache.update = apt_update self.facade.reload_channels(force_reload_binaries=True) self.assertEqual([], passed_in_lists) def test_reload_channels_force_reload_binaries_refetch_package_index(self): """ If C{refetch_package_index} is True, C{reload_channels} won't limit the update to the internal repos, even if C{force_reload_binaries} is specified. """ passed_in_lists = [] def new_apt_update(sources_list=None): passed_in_lists.append(sources_list) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.refetch_package_index = True self.facade._cache.update = new_apt_update self.facade.reload_channels(force_reload_binaries=True) self.assertEqual([None], passed_in_lists) def test_reload_channels_force_reload_binaries_new_apt(self): """ If python-apt is new enough (i.e. the C{update()} method accepts a C{sources_list} parameter), the .list file containing the repos managed by the facade will be passed to C{update()}, so that only the internal repos are updated if C{force_reload_binaries} is specified. """ passed_in_lists = [] def new_apt_update(sources_list=None): passed_in_lists.append(sources_list) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.refetch_package_index = False self.facade._cache.update = new_apt_update self.facade.reload_channels(force_reload_binaries=True) self.assertEqual( [self.facade._get_internal_sources_list()], passed_in_lists) def test_reload_channels_force_reload_binaries_old_apt(self): """ If python-apt is old (i.e. the C{update()} method doesn't accept a C{sources_list} parameter), everything will be updated if C{force_reload_binaries} is specified, since there is no API for limiting which repos should be updated. """ passed_in_lists = [] def old_apt_update(): passed_in_lists.append(None) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.refetch_package_index = False self.facade._cache.update = old_apt_update self.facade.reload_channels(force_reload_binaries=True) self.assertEqual([None], passed_in_lists) def test_dont_refetch_package_index_by_default(self): """ By default, package indexes are not refetched, but the local database is used. """ new_facade = AptFacade(root=self.apt_root) self.assertFalse(new_facade.refetch_package_index) def test_ensure_channels_reloaded_do_not_reload_twice(self): """ C{ensure_channels_reloaded} refreshes the channels only when first called. If it's called more time, it has no effect. """ self._add_system_package("foo") self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], sorted(version.package.name for version in self.facade.get_packages())) self._add_system_package("bar") self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], sorted(version.package.name for version in self.facade.get_packages())) def test_ensure_channels_reloaded_reload_channels(self): """ C{ensure_channels_reloaded} doesn't refresh the channels if C{reload_chanels} have been called first. """ self._add_system_package("foo") self.facade.reload_channels() self.assertEqual( ["foo"], sorted(version.package.name for version in self.facade.get_packages())) self._add_system_package("bar") self.facade.ensure_channels_reloaded() self.assertEqual( ["foo"], sorted(version.package.name for version in self.facade.get_packages())) def test_reload_channels_with_channel_error(self): """ The C{reload_channels} method raises a L{ChannelsError} if apt fails to load the configured channels. """ self.facade.add_channel_apt_deb("non-proto://fail.url", "./") self.assertRaises(ChannelError, self.facade.reload_channels) def test_get_set_arch(self): """ C{get_arch} returns the architecture that APT is currently configured to use. C{set_arch} is used to set the architecture that APT should use. """ self.facade.set_arch("amd64") self.assertEqual("amd64", self.facade.get_arch()) self.facade.set_arch("i386") self.assertEqual("i386", self.facade.get_arch()) def test_get_set_arch_none(self): """ If C{None} is passed to C{set_arch()}, the architecture is set to "", since it can't be set to C{None}. This is to ensure compatibility with C{SmartFacade}, and the architecture should be set to C{None} in tests only. """ self.facade.set_arch(None) self.assertEqual("", self.facade.get_arch()) def test_set_arch_get_packages(self): """ After the architecture is set, APT really uses the value. """ self._add_system_package("i386-package", architecture="i386") self._add_system_package("amd64-package", architecture="amd64") self.facade.set_arch("i386") self.facade.reload_channels() self.assertEqual( ["i386-package"], sorted(version.package.name for version in self.facade.get_packages())) self.facade.set_arch("amd64") self.facade.reload_channels() self.assertEqual( ["amd64-package"], sorted(version.package.name for version in self.facade.get_packages())) def test_get_package_skeleton(self): """ C{get_package_skeleton} returns a C{PackageSkeleton} for a package. By default extra information is included, but it's possible to specify that only basic information should be included. The information about the package are unicode strings. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg1] = self.facade.get_packages_by_name("name1") [pkg2] = self.facade.get_packages_by_name("name2") skeleton1 = self.facade.get_package_skeleton(pkg1) self.assertTrue(isinstance(skeleton1.summary, unicode)) self.assertEqual("Summary1", skeleton1.summary) skeleton2 = self.facade.get_package_skeleton(pkg2, with_info=False) self.assertIs(None, skeleton2.summary) self.assertEqual(HASH1, skeleton1.get_hash()) self.assertEqual(HASH2, skeleton2.get_hash()) def test_get_package_hash(self): """ C{get_package_hash} returns the hash for a given package. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg] = self.facade.get_packages_by_name("name1") self.assertEqual(HASH1, self.facade.get_package_hash(pkg)) [pkg] = self.facade.get_packages_by_name("name2") self.assertEqual(HASH2, self.facade.get_package_hash(pkg)) def test_get_package_hashes(self): """ C{get_package_hashes} returns the hashes for all packages in the channels. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() hashes = self.facade.get_package_hashes() self.assertEqual(sorted(hashes), sorted([HASH1, HASH2, HASH3])) def test_get_package_by_hash(self): """ C{get_package_by_hash} returns the package that has the given hash. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() version = self.facade.get_package_by_hash(HASH1) self.assertEqual(version.package.name, "name1") version = self.facade.get_package_by_hash(HASH2) self.assertEqual(version.package.name, "name2") version = self.facade.get_package_by_hash("none") self.assertEqual(version, None) def test_wb_reload_channels_clears_hash_cache(self): """ To improve performance, the hashes for the packages are cached. When reloading the channels, the cache is recreated. """ # Load hashes. deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() # Hold a reference to packages. [pkg1] = self.facade.get_packages_by_name("name1") [pkg2] = self.facade.get_packages_by_name("name2") [pkg3] = self.facade.get_packages_by_name("name3") self.assertTrue(pkg1 and pkg2) # Remove the package from the repository. packages_path = os.path.join(deb_dir, "Packages") os.unlink(os.path.join(deb_dir, PKGNAME1)) os.unlink(packages_path) self.facade._create_packages_file(deb_dir) # Forcibly change the mtime of our repository's Packages file, # so that apt will consider it as changed (if the change is # inside the same second the Packages' mtime will be the same) self._touch_packages_file(deb_dir) # Reload channel to reload the cache. self.facade.reload_channels() # Only packages with name2 and name3 should be loaded, and they're # not the same objects anymore. self.assertEqual( sorted([version.package.name for version in self.facade.get_packages()]), ["name2", "name3"]) self.assertNotEquals( set([version.package for version in self.facade.get_packages()]), set([pkg2.package, pkg3.package])) # The hash cache shouldn't include either of the old packages. self.assertEqual(self.facade.get_package_hash(pkg1), None) self.assertEqual(self.facade.get_package_hash(pkg2), None) self.assertEqual(self.facade.get_package_hash(pkg3), None) # Also, the hash for package1 shouldn't be present at all. self.assertEqual(self.facade.get_package_by_hash(HASH1), None) # While HASH2 and HASH3 should point to the new packages. We # look at the Package object instead of the Version objects, # since different Version objects may appear to be the same # object. new_pkgs = [version.package for version in self.facade.get_packages()] self.assertTrue( self.facade.get_package_by_hash(HASH2).package in new_pkgs) self.assertTrue( self.facade.get_package_by_hash(HASH3).package in new_pkgs) # Which are not the old packages. self.assertFalse(pkg2.package in new_pkgs) self.assertFalse(pkg3.package in new_pkgs) def test_is_package_installed_in_channel_not_installed(self): """ If a package is in a channel, but not installed, it's not considered installed. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [package] = self.facade.get_packages_by_name("name1") self.assertFalse(self.facade.is_package_installed(package)) def test_is_package_installed_in_channel_installed(self): """ If a package is in a channel and installed, it's considered installed. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self._install_deb_file(os.path.join(deb_dir, PKGNAME1)) self.facade.reload_channels() [package] = self.facade.get_packages_by_name("name1") self.assertTrue(self.facade.is_package_installed(package)) def test_is_package_installed_other_verion_in_channel(self): """ If the there are other versions in the channels, only the installed version of thepackage is considered installed. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self._add_package_to_deb_dir( deb_dir, "name1", version="version0-release0") self._add_package_to_deb_dir( deb_dir, "name1", version="version2-release2") self._install_deb_file(os.path.join(deb_dir, PKGNAME1)) self.facade.reload_channels() [version0, version1, version2] = sorted( self.facade.get_packages_by_name("name1")) self.assertEqual("version0-release0", version0.version) self.assertFalse(self.facade.is_package_installed(version0)) self.assertEqual("version1-release1", version1.version) self.assertTrue(self.facade.is_package_installed(version1)) self.assertEqual("version2-release2", version2.version) self.assertFalse(self.facade.is_package_installed(version2)) def test_is_package_available_in_channel_not_installed(self): """ A package is considered available if the package is in a configured channel and not installed. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [package] = self.facade.get_packages_by_name("name1") self.assertTrue(self.facade.is_package_available(package)) def test_is_package_available_not_in_channel_installed(self): """ A package is not considered available if the package is installed and not in a configured channel. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self._install_deb_file(os.path.join(deb_dir, PKGNAME1)) self.facade.reload_channels() [package] = self.facade.get_packages_by_name("name1") self.assertFalse(self.facade.is_package_available(package)) def test_is_package_available_in_channel_installed(self): """ A package is considered available if the package is installed and is in a configured channel. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self._install_deb_file(os.path.join(deb_dir, PKGNAME1)) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [package] = self.facade.get_packages_by_name("name1") self.assertTrue(self.facade.is_package_available(package)) def test_is_package_upgrade_in_channel_not_installed(self): """ A package is not consider an upgrade of no version of it is installed. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [package] = self.facade.get_packages() self.assertFalse(self.facade.is_package_upgrade(package)) def test_is_package_upgrade_in_channel_older_installed(self): """ A package is considered to be an upgrade if some channel has a newer version than the installed one. """ deb_dir = self.makeDir() self._add_system_package("foo", version="0.5") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [version_05, version_10] = sorted(self.facade.get_packages()) self.assertTrue(self.facade.is_package_upgrade(version_10)) self.assertFalse(self.facade.is_package_upgrade(version_05)) def test_is_package_upgrade_in_channel_newer_installed(self): """ A package is not considered to be an upgrade if there are only older versions than the installed one in the channels. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.5") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [version_10, version_15] = sorted(self.facade.get_packages()) self.assertFalse(self.facade.is_package_upgrade(version_10)) self.assertFalse(self.facade.is_package_upgrade(version_15)) def test_is_package_upgrade_in_channel_same_as_installed(self): """ A package is not considered to be an upgrade if the newest version of the packages available in the channels is the same as the installed one. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [package] = self.facade.get_packages() self.assertFalse(self.facade.is_package_upgrade(package)) def test_is_package_upgrade_not_in_channel_installed(self): """ A package is not considered to be an upgrade if the package is installed but not available in any of the configured channels. """ self._add_system_package("foo", version="1.0") self.facade.reload_channels() [package] = self.facade.get_packages() self.assertFalse(self.facade.is_package_upgrade(package)) def test_get_packages_by_name_no_match(self): """ If there are no packages with the given name, C{get_packages_by_name} returns an empty list. """ self._add_system_package("foo", version="1.0") self.facade.reload_channels() self.assertEqual([], self.facade.get_packages_by_name("bar")) def test_get_packages_by_name_match(self): """ C{get_packages_by_name} returns all the packages in the available channels that have the specified name. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() self.assertEqual( [("foo", "1.0"), ("foo", "1.5")], sorted([(version.package.name, version.version) for version in self.facade.get_packages_by_name("foo")])) def test_perform_changes_with_nothing_to_do(self): """ perform_changes() should return None when there's nothing to do. """ self.facade.reload_channels() self.assertEqual(self.facade.perform_changes(), None) self.assertEqual("none", os.environ["APT_LISTCHANGES_FRONTEND"]) self.assertEqual("none", os.environ["APT_LISTBUGS_FRONTEND"]) self.assertEqual("noninteractive", os.environ["DEBIAN_FRONTEND"]) self.assertEqual(["--force-confold"], apt_pkg.config.value_list("DPkg::options")) def test_perform_changes_with_no_path(self): """ perform_changes() sets C{PATH} if it's not set already, since dpkg requires it to be set. """ del os.environ["PATH"] self.facade.reload_channels() self.assertEqual(self.facade.perform_changes(), None) self.assertEqual(UBUNTU_PATH, os.environ["PATH"]) def test_perform_changes_with_path(self): """ perform_changes() doesn't set C{PATH} if it's set already. """ os.environ["PATH"] = "custom-path" self.facade.reload_channels() self.assertEqual(self.facade.perform_changes(), None) self.assertEqual("custom-path", os.environ["PATH"]) def test_perform_changes_fetch_progress(self): """ C{perform_changes()} captures the fetch output and returns it. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) fetch_item = FakeFetchItem( FakeOwner(1234, error_text="Some error"), "foo package") def output_progress(fetch_progress, install_progress): fetch_progress.start() fetch_progress.fetch(fetch_item) fetch_progress.fail(fetch_item) fetch_progress.done(fetch_item) fetch_progress.stop() self.patch_cache_commit(output_progress) output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() if line.strip()] # Don't do a plain comparision of the output, since the output # in Lucid is slightly different. self.assertEqual(4, len(output)) self.assertTrue(output[0].startswith("Get:1 foo package")) self.assertEqual( ["Err foo package", " Some error"], output[1:3]) self.assertTrue(output[3].startswith("Fetched ")) def test_perform_changes_dpkg_output(self): """ C{perform_changes()} captures the dpkg output and returns it. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) def print_output(fetch_progress, install_progress): os.write(1, "Stdout output\n") os.write(2, "Stderr output\n") os.write(1, "Stdout output again\n") self.patch_cache_commit(print_output) output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() if line.strip()] self.assertEqual( ["Stdout output", "Stderr output", "Stdout output again"], output) def test_perform_changes_dpkg_output_error(self): """ C{perform_changes()} captures the dpkg output and includes it in the exception message, if committing the cache fails. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) def commit(fetch_progress, install_progress): os.write(1, "Stdout output\n") os.write(2, "Stderr output\n") os.write(1, "Stdout output again\n") raise SystemError("Oops") self.facade._cache.commit = commit exception = self.assertRaises( TransactionError, self.facade.perform_changes) output = [ line.rstrip() for line in exception.args[0].splitlines() if line.strip()] self.assertEqual( ["Oops", "Package operation log:", "Stdout output", "Stderr output", "Stdout output again"], output) def _test_retry_changes(self, error_type): """ Test that changes are retried with the given exception type. """ self.facade.max_dpkg_retries = 1 deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) def commit1(fetch_progress, install_progress): self.facade._cache.commit = commit2 os.write(2, "bad stuff!\n") raise error_type("Oops") def commit2(fetch_progress, install_progress): install_progress.dpkg_exited = True os.write(1, "good stuff!") self.facade._cache.commit = commit1 output = [ line.rstrip() for line in self.facade.perform_changes().splitlines() if line.strip()] self.assertEqual(["bad stuff!", "good stuff!"], output) def test_retry_changes_system_error(self): """ Changes are retried in the event of a SystemError. """ self._test_retry_changes(SystemError) def test_retry_changes_lock_failed(self): """ Changes are retried in the event of a L{LockFailedException}. """ self._test_retry_changes(LockFailedException) def test_perform_changes_dpkg_error_real(self): """ C{perform_changes()} detects whether the dpkg call fails and raises a C{TransactionError}. This test executes dpkg for real, which should fail, complaining that superuser privileges are needed. The error from the dpkg sub process is included. """ self._add_system_package("foo") self.facade.reload_channels() foo = self.facade.get_packages_by_name("foo")[0] self.facade.mark_remove(foo) self.assertRaises(TransactionError, self.facade.perform_changes) def test_perform_changes_dpkg_error_retains_excepthook(self): """ We install a special excepthook when preforming package operations, to prevent Apport from generating crash reports when dpkg returns a failure. It's only installed when doing the actual package operation, so the original excepthook is there after the perform_changes() method returns. """ old_excepthook = sys.excepthook self._add_system_package("foo") self.facade.reload_channels() foo = self.facade.get_packages_by_name("foo")[0] self.facade.mark_remove(foo) self.assertRaises(TransactionError, self.facade.perform_changes) self.assertIs(old_excepthook, sys.excepthook) def test_prevent_dpkg_apport_error_system_error(self): """ C{_prevent_dpkg_apport_error} prevents the Apport excepthook from being called when a SystemError happens, since SystemErrors are expected to happen and will be caught in the Apt C binding.. """ hook_calls = [] progress = LandscapeInstallProgress() progress.old_excepthook = ( lambda exc_type, exc_value, exc_tb: hook_calls.append( (exc_type, exc_value, exc_tb))) progress._prevent_dpkg_apport_error( SystemError, SystemError("error"), object()) self.assertEqual([], hook_calls) def test_prevent_dpkg_apport_error_system_error_calls_system_hook(self): """ C{_prevent_dpkg_apport_error} prevents the Apport excepthook from being called when a SystemError happens, but it does call the system except hook, which is the one that was in place before apport installed a custom one. This makes the exception to be printed to stderr. """ progress = LandscapeInstallProgress() sys_except_hook = self.mocker.replace("sys.__excepthook__") error = SystemError("error") tb = object() sys_except_hook(SystemError, error, tb) self.mocker.result(None) self.mocker.replay() progress._prevent_dpkg_apport_error(SystemError, error, tb) def test_prevent_dpkg_apport_error_non_system_error(self): """ If C{_prevent_dpkg_apport_error} gets an exception that isn't a SystemError, the old Apport hook is being called. """ hook_calls = [] progress = LandscapeInstallProgress() progress.old_excepthook = ( lambda exc_type, exc_value, exc_tb: hook_calls.append( (exc_type, exc_value, exc_tb))) error = object() traceback = object() progress._prevent_dpkg_apport_error(Exception, error, traceback) self.assertEqual([(Exception, error, traceback)], hook_calls) def test_perform_changes_dpkg_exit_dirty(self): """ C{perform_changes()} checks whether dpkg exited cleanly and raises a TransactionError if it didn't. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo = self.facade.get_packages_by_name("foo")[0] self.facade.mark_install(foo) def commit(fetch_progress, install_progress): install_progress.dpkg_exited = False os.write(1, "Stdout output\n") self.facade._cache.commit = commit exception = self.assertRaises( TransactionError, self.facade.perform_changes) output = [ line.rstrip() for line in exception.args[0].splitlines()if line.strip()] self.assertEqual( ["dpkg didn't exit cleanly.", "Package operation log:", "Stdout output"], output) def test_perform_changes_install_broken_includes_error_info(self): """ If some packages are broken and can't be installed, information about the unmet dependencies is included in the error message that C{perform_changes()} will raise. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "missing | lost (>= 1.0)", "Pre-Depends": "pre-missing | pre-lost"}) self._add_package_to_deb_dir( deb_dir, "bar", control_fields={"Depends": "also-missing | also-lost (>= 1.0)", "Pre-Depends": "also-pre-missing | also-pre-lost"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_install(foo) self.facade.mark_install(bar) self.patch_cache_commit() error = self.assertRaises( TransactionError, self.facade.perform_changes) self.assertIn("you have held broken packages", error.args[0]) self.assertEqual( ["The following packages have unmet dependencies:", " bar: PreDepends: also-pre-missing but is not installable or", " also-pre-lost but is not installable", " bar: Depends: also-missing but is not installable or", " also-lost (>= 1.0) but is not installable", " foo: PreDepends: pre-missing but is not installable or", " pre-lost but is not installable", " foo: Depends: missing but is not installable or", " lost (>= 1.0) but is not installable"], error.args[0].splitlines()[-9:]) def test_get_unmet_dependency_info_no_broken(self): """ If there are no broken packages, C{_get_unmet_dependency_info} returns no dependency information. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() self.assertEqual(set(), self.facade._get_broken_packages()) self.assertEqual("", self.facade._get_unmet_dependency_info()) def test_get_unmet_dependency_info_depend(self): """ If a C{Depends} dependency is unmet, C{_get_unmet_dependency_info} returns information about it, including the dependency type. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_predepend(self): """ If a C{Pre-Depends} dependency is unmet, C{_get_unmet_dependency_info} returns information about it, including the dependency type. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Pre-Depends": "bar"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: PreDepends: bar but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_version(self): """ If an unmet dependency includes a version relation, it's included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar (>= 1.0)"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar (>= 1.0) but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_dep_install(self): """ If an unmet dependency is being installed (but still doesn't meet the vesion requirements), the version being installed is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="0.5") self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar (>= 1.0)"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") foo.package.mark_install(auto_fix=False) bar.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar (>= 1.0) but 0.5 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_dep_already_installed(self): """ If an unmet dependency is already installed (but still doesn't meet the vesion requirements), the version that is installed is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) self.assertEqual(bar2, bar1.package.candidate) foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar (>= 3.0) but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_dep_upgraded(self): """ If an unmet dependency is being upgraded (but still doesn't meet the vesion requirements), the version that it is upgraded to is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) self.assertEqual(bar2, bar1.package.candidate) foo.package.mark_install(auto_fix=False) bar1.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar (>= 3.0) but 2.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_dep_downgraded(self): """ If an unmet dependency is being downgraded (but still doesn't meet the vesion requirements), the version that it is downgraded to is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("bar", version="2.0") self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar (>= 3.0)"}) self._add_package_to_deb_dir(deb_dir, "bar", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) self.assertEqual(bar2, bar1.package.candidate) bar1.package.candidate = bar1 foo.package.mark_install(auto_fix=False) bar1.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar (>= 3.0) but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_or_deps(self): """ If an unmet dependency includes an or relation, all of the possible options are included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar | baz (>= 1.0)"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: bar but is not installable or", " baz (>= 1.0) but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_conflicts(self): """ If a package is broken because it conflicts with a package to be installed, information about the conflict is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( deb_dir, "bar", control_fields={"Conflicts": "foo"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") bar.package.mark_install(auto_fix=False) # Mark as keep to ensure it stays broken and isn't automatically # removed by the resolver. foo.package.mark_keep() self.assertEqual( set([bar.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " bar: Conflicts: foo but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_breaks(self): """ If a package is broken because it breaks a package to be installed, information about the conflict is included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir( deb_dir, "bar", control_fields={"Breaks": "foo"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") foo.package.mark_install(auto_fix=False) self.assertEqual( set([bar.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " bar: Breaks: foo but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_conflicts_not_installed(self): """ If a broken package conflicts or breaks a package that isn't installed or marked for installation, information about that conflict isn't reported by C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( deb_dir, "bar", control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}) self._add_package_to_deb_dir(deb_dir, "baz") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") bar.package.mark_install(auto_fix=False) # Mark as keep to ensure it stays broken and isn't automatically # removed by the resolver. foo.package.mark_keep() self.assertEqual( set([bar.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " bar: Conflicts: foo but 1.0 is to be installed", " bar: Breaks: foo but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_with_conflicts_marked_delete(self): """ If a broken package conflicts or breaks an installed package that is marekd for removal, information about that conflict isn't reported by C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("foo") self._add_package_to_deb_dir( deb_dir, "bar", control_fields={"Conflicts": "foo, baz", "Breaks": "foo, baz"}) self._add_system_package("baz") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") [baz] = self.facade.get_packages_by_name("baz") baz.package.mark_delete(auto_fix=False) bar.package.mark_install(auto_fix=False) # Mark as keep to ensure it stays broken and isn't automatically # removed by the resolver. foo.package.mark_keep() self.assertEqual( set([bar.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " bar: Conflicts: foo but 1.0 is to be installed", " bar: Breaks: foo but 1.0 is to be installed"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_only_unmet(self): """ If a broken packages have some dependencies that are being fulfilled, those aren't included in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_system_package("there1") self._add_system_package("there2") self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "there1, missing1, there2 | missing2"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Depends: missing1 but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_multiple_broken(self): """ If multiple packages are broken, all broken packages are listed in the error information from C{_get_unmet_dependency_info}. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir( deb_dir, "another-foo", control_fields={"Depends": "another-bar"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [another_foo] = self.facade.get_packages_by_name("another-foo") foo.package.mark_install(auto_fix=False) another_foo.package.mark_install(auto_fix=False) self.assertEqual( set([foo.package, another_foo.package]), self.facade._get_broken_packages()) self.assertEqual( ["The following packages have unmet dependencies:", " another-foo: Depends: another-bar but is not installable", " foo: Depends: bar but is not installable"], self.facade._get_unmet_dependency_info().splitlines()) def test_get_unmet_dependency_info_unknown(self): """ If a package is broken but fulfills all PreDepends, Depends, Conflicts and Breaks dependencies, C{_get_unmet_dependency_info} reports that that package has an unknown dependency error, since we don't know why it's broken. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade._get_broken_packages = lambda: set([foo.package]) self.assertEqual( ["The following packages have unmet dependencies:", " foo: Unknown dependency error"], self.facade._get_unmet_dependency_info().splitlines()) def _mock_output_restore(self): """ Mock methods to ensure that stdout and stderr are restored, after they have been captured. Return the path to the tempfile that was used to capture the output. """ old_stdout = os.dup(1) old_stderr = os.dup(2) fd, outfile = tempfile.mkstemp() mkstemp = self.mocker.replace("tempfile.mkstemp") mkstemp() self.mocker.result((fd, outfile)) dup = self.mocker.replace("os.dup") dup(1) self.mocker.result(old_stdout) dup(2) self.mocker.result(old_stderr) dup2 = self.mocker.replace("os.dup2") dup2(old_stdout, 1) self.mocker.passthrough() dup2(old_stderr, 2) self.mocker.passthrough() return outfile def test_perform_changes_dpkg_output_reset(self): """ C{perform_changes()} resets stdout and stderr after the cache commit. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) outfile = self._mock_output_restore() self.mocker.replay() self.patch_cache_commit() self.facade.perform_changes() # Make sure we don't leave the tempfile behind. self.assertFalse(os.path.exists(outfile)) def test_perform_changes_dpkg_output_reset_error(self): """ C{perform_changes()} resets stdout and stderr after the cache commit, even if commit raises an error. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) outfile = self._mock_output_restore() self.mocker.replay() def commit(fetch_progress, install_progress): raise SystemError("Error") self.facade._cache.commit = commit self.assertRaises(TransactionError, self.facade.perform_changes) # Make sure we don't leave the tempfile behind. self.assertFalse(os.path.exists(outfile)) def test_reset_marks(self): """ C{reset_marks()} clears things, so that there's nothing to do for C{perform_changes()} """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo") self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir(deb_dir, "bar", version="1.5") self._add_system_package("baz") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self._add_system_package("quux", version="1.0") self._add_system_package("wibble", version="1.0") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) self.facade.mark_global_upgrade() [baz] = self.facade.get_packages_by_name("baz") self.facade.mark_remove(baz) [quux] = self.facade.get_packages_by_name("quux") self.facade.mark_hold(quux) [wibble] = self.facade.get_packages_by_name("wibble") self.facade.mark_remove_hold(wibble) self.facade.reset_marks() self.assertEqual(self.facade._version_installs, []) self.assertEqual(self.facade._version_removals, []) self.assertFalse(self.facade._global_upgrade) self.assertEqual(self.facade._version_hold_creations, []) self.assertEqual(self.facade._version_hold_removals, []) self.assertEqual(self.facade.perform_changes(), None) def test_reset_marks_resets_cache(self): """ C{reset_marks()} clears the apt cache, so that no changes will be pending. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) self.assertRaises(DependencyError, self.facade.perform_changes) self.assertNotEqual([], list(self.facade._cache.get_changes())) self.facade.reset_marks() self.assertEqual([], list(self.facade._cache.get_changes())) def test_wb_mark_install_adds_to_list(self): """ C{mark_install} adds the package to the list of packages to be installed. """ deb_dir = self.makeDir() create_deb(deb_dir, PKGNAME_MINIMAL, PKGDEB_MINIMAL) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg] = self.facade.get_packages_by_name("minimal") self.facade.mark_install(pkg) self.assertEqual(1, len(self.facade._version_installs)) [install] = self.facade._version_installs self.assertEqual("minimal", install.package.name) def test_wb_mark_global_upgrade_sets_variable(self): """ C{mark_global_upgrade} sets a variable, so that the actual upgrade happens in C{perform_changes}. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo_10 = sorted(self.facade.get_packages_by_name("foo"))[0] self.facade.mark_global_upgrade() self.assertTrue(self.facade._global_upgrade) self.assertEqual(foo_10, foo_10.package.installed) def test_wb_mark_remove_adds_to_list(self): """ C{mark_remove} adds the package to the list of packages to be removed. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove(foo) self.assertEqual([foo], self.facade._version_removals) def test_mark_install_specific_version(self): """ If more than one version is available, the version passed to C{mark_install} is marked as the candidate version, so that gets installed. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo1, foo2 = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo2, foo1.package.candidate) self.facade.mark_install(foo1) self.patch_cache_commit() self.facade.perform_changes() self.assertEqual(foo1, foo1.package.candidate) def test_wb_mark_install_upgrade_non_main_arch(self): """ If C{mark_install} is used to upgrade a package, its non-main architecture version of the package will be upgraded as well, if it is installed. """ apt_pkg.config.clear("APT::Architectures") apt_pkg.config.set("APT::Architecture", "amd64") apt_pkg.config.set("APT::Architectures::", "amd64") apt_pkg.config.set("APT::Architectures::", "i386") deb_dir = self.makeDir() self._add_system_package( "multi-arch", architecture="amd64", version="1.0", control_fields={"Multi-Arch": "same"}) self._add_system_package( "multi-arch", architecture="i386", version="1.0", control_fields={"Multi-Arch": "same"}) self._add_system_package( "single-arch", architecture="amd64", version="1.0") self._add_package_to_deb_dir( deb_dir, "multi-arch", architecture="amd64", version="2.0", control_fields={"Multi-Arch": "same"}) self._add_package_to_deb_dir( deb_dir, "multi-arch", architecture="i386", version="2.0", control_fields={"Multi-Arch": "same"}) self._add_package_to_deb_dir( deb_dir, "single-arch", architecture="amd64", version="2.0") self._add_package_to_deb_dir( deb_dir, "single-arch", architecture="i386", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() multi_arch1, multi_arch2 = sorted( self.facade.get_packages_by_name("multi-arch")) single_arch1, single_arch2 = sorted( self.facade.get_packages_by_name("single-arch")) self.facade.mark_remove(multi_arch1) self.facade.mark_install(multi_arch2) self.facade.mark_remove(single_arch1) self.facade.mark_install(single_arch2) self.patch_cache_commit() self.facade.perform_changes() changes = [ (pkg.name, pkg.candidate.version, pkg.marked_upgrade) for pkg in self.facade._cache.get_changes()] self.assertEqual( [("multi-arch", "2.0", True), ("multi-arch:i386", "2.0", True), ("single-arch", "2.0", True)], sorted(changes)) def test_wb_mark_install_upgrade_non_main_arch_dependency_error(self): """ If a non-main architecture is automatically upgraded, and the main architecture versions hasn't been marked for installation, only the main architecture version is included in the C{DependencyError}. """ apt_pkg.config.clear("APT::Architectures") apt_pkg.config.set("APT::Architecture", "amd64") apt_pkg.config.set("APT::Architectures::", "amd64") apt_pkg.config.set("APT::Architectures::", "i386") deb_dir = self.makeDir() self._add_system_package( "multi-arch", architecture="amd64", version="1.0", control_fields={"Multi-Arch": "same"}) self._add_system_package( "multi-arch", architecture="i386", version="1.0", control_fields={"Multi-Arch": "same"}) self._add_package_to_deb_dir( deb_dir, "multi-arch", architecture="amd64", version="2.0", control_fields={"Multi-Arch": "same"}) self._add_package_to_deb_dir( deb_dir, "multi-arch", architecture="i386", version="2.0", control_fields={"Multi-Arch": "same"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() multi_arch1, multi_arch2 = sorted( self.facade.get_packages_by_name("multi-arch")) self.facade.mark_global_upgrade() self.patch_cache_commit() exception = self.assertRaises( DependencyError, self.facade.perform_changes) self.assertEqual( sorted([multi_arch1, multi_arch2]), sorted(exception.packages)) changes = [ (pkg.name, pkg.candidate.version) for pkg in self.facade._cache.get_changes()] self.assertEqual( [("multi-arch", "2.0"), ("multi-arch:i386", "2.0")], sorted(changes)) def test_mark_global_upgrade(self): """ C{mark_global_upgrade} upgrades all packages that can be upgraded. It makes C{perform_changes} raise a C{DependencyError} with the required changes, so that the user can review the changes and approve them. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_system_package("bar") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "baz") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo1, foo2 = sorted(self.facade.get_packages_by_name("foo")) self.facade.mark_global_upgrade() exception = self.assertRaises( DependencyError, self.facade.perform_changes) self.assertEqual(set([foo1, foo2]), set(exception.packages)) def test_mark_global_upgrade_candidate_version(self): """ If more than one version is available, the package will be upgraded to the candidate version. Since the user didn't request from and to which version to upgrade to, a DependencyError error will be raised, so that the changes can be reviewed and approved. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "foo", version="3.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo1, foo2, foo3 = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo3, foo1.package.candidate) self.facade.mark_global_upgrade() exception = self.assertRaises( DependencyError, self.facade.perform_changes) self.assertEqual(set([foo1, foo3]), set(exception.packages)) def test_mark_global_upgrade_no_upgrade(self): """ If the candidate version of a package is already installed, C{mark_global_upgrade()} won't request an upgrade to be made. I.e. C{perform_changes()} won't do anything. """ deb_dir = self.makeDir() self._add_system_package("foo", version="3.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo3 = sorted(self.facade.get_packages_by_name("foo"))[-1] self.assertEqual(foo3, foo3.package.candidate) self.facade.mark_global_upgrade() self.assertEqual(None, self.facade.perform_changes()) def test_mark_global_upgrade_preserves_auto(self): """ Upgrading a package will retain its auto-install status. """ deb_dir = self.makeDir() self._add_system_package("auto", version="1.0") self._add_package_to_deb_dir(deb_dir, "auto", version="2.0") self._add_system_package("noauto", version="1.0") self._add_package_to_deb_dir(deb_dir, "noauto", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() auto1, auto2 = sorted(self.facade.get_packages_by_name("auto")) noauto1, noauto2 = sorted(self.facade.get_packages_by_name("noauto")) auto1.package.mark_auto(True) noauto1.package.mark_auto(False) self.facade.mark_global_upgrade() self.assertRaises(DependencyError, self.facade.perform_changes) self.assertTrue(auto2.package.is_auto_installed) self.assertFalse(noauto2.package.is_auto_installed) def test_wb_perform_changes_commits_changes(self): """ When calling C{perform_changes}, it will commit the cache, to cause all package changes to happen. """ committed = [] def commit(fetch_progress, install_progress): committed.append(True) deb_dir = self.makeDir() create_deb(deb_dir, PKGNAME_MINIMAL, PKGDEB_MINIMAL) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg] = self.facade.get_packages_by_name("minimal") self.facade.mark_install(pkg) self.patch_cache_commit(commit) self.committed = False self.facade.perform_changes() self.assertEqual([True], committed) def test_perform_changes_return_non_none(self): """ When calling C{perform_changes} with changes to do, it will return a string. """ deb_dir = self.makeDir() create_deb(deb_dir, PKGNAME_MINIMAL, PKGDEB_MINIMAL) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg] = self.facade.get_packages_by_name("minimal") self.facade.mark_install(pkg) self.patch_cache_commit() # An empty string is returned, since we don't call the progress # objects, which are the ones that build the output string. self.assertEqual("", self.facade.perform_changes()) def test_perform_changes_with_broken_packages_install_simple(self): """ Even if some installed packages are broken in the system, it's still possible to install packages with no dependencies that don't touch the broken ones. """ deb_dir = self.makeDir() self._add_system_package( "broken", control_fields={"Depends": "missing"}) self._add_package_to_deb_dir(deb_dir, "foo") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_install(foo) self.patch_cache_commit() self.assertEqual("", self.facade.perform_changes()) self.assertEqual( [foo.package], self.facade._cache.get_changes()) def test_perform_changes_with_broken_packages_install_deps(self): """ Even if some installed packages are broken in the system, it's still possible to install packages where the dependencies need to be calculated. """ deb_dir = self.makeDir() self._add_system_package( "broken", control_fields={"Depends": "missing"}) self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir(deb_dir, "bar") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_install(foo) self.patch_cache_commit() error = self.assertRaises(DependencyError, self.facade.perform_changes) self.assertEqual([bar], error.packages) def test_perform_changes_with_broken_packages_remove_simple(self): """ Even if some installed packages are broken in the system, it's still possible to remove packages that don't affect the broken ones. """ deb_dir = self.makeDir() self._add_system_package( "broken", control_fields={"Depends": "missing"}) self._add_system_package("foo") self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove(foo) self.patch_cache_commit() self.assertEqual("", self.facade.perform_changes()) self.assertEqual( [foo.package], self.facade._cache.get_changes()) def test_perform_changes_with_broken_packages_install_broken(self): """ If some installed package is in a broken state and you install a package that fixes the broken package, as well as a new broken package, C{perform_changes()} will raise a C{TransactionError}. This test specifically tests the case where you replace the broken packages, but have the same number of broken packages before and after the changes. """ deb_dir = self.makeDir() self._add_system_package( "broken", control_fields={"Depends": "missing"}) self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "really-missing"}) self._add_package_to_deb_dir(deb_dir, "missing") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [broken] = self.facade.get_packages_by_name("broken") [foo] = self.facade.get_packages_by_name("foo") [missing] = self.facade.get_packages_by_name("missing") self.assertEqual( set([broken.package]), self.facade._get_broken_packages()) self.facade.mark_install(foo) self.facade.mark_install(missing) self.patch_cache_commit() error = self.assertRaises( TransactionError, self.facade.perform_changes) self.assertIn("you have held broken packages", error.args[0]) self.assertEqual( set([foo.package]), self.facade._get_broken_packages()) def test_wb_perform_changes_commit_error(self): """ If an error happens when committing the changes to the cache, a transaction error is raised. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove(foo) cache = self.mocker.replace(self.facade._cache) cache.commit(fetch_progress=ANY, install_progress=ANY) self.mocker.throw(SystemError("Something went wrong.")) self.mocker.replay() exception = self.assertRaises(TransactionError, self.facade.perform_changes) self.assertIn("Something went wrong.", exception.args[0]) def test_mark_install_transaction_error(self): """ Mark package 'name1' for installation, and try to perform changes. It should fail because 'name1' depends on 'requirename1', which isn't available in the package cache. """ deb_dir = self.makeDir() create_simple_repository(deb_dir) self.facade.add_channel_deb_dir(deb_dir) self.facade.reload_channels() [pkg] = self.facade.get_packages_by_name("name1") self.facade.mark_install(pkg) exception = self.assertRaises(TransactionError, self.facade.perform_changes) # XXX: Investigate if we can get a better error message. #self.assertIn("requirename", exception.args[0]) self.assertIn("Unable to correct problems", exception.args[0]) def test_mark_install_dependency_error(self): """ If a dependency hasn't been marked for installation, a DependencyError is raised with the packages that need to be installed. """ deb_dir = self.makeDir() self._add_package_to_deb_dir( deb_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_install(foo) error = self.assertRaises(DependencyError, self.facade.perform_changes) self.assertEqual([bar], error.packages) def test_wb_check_changes_unapproved_install_default(self): """ C{_check_changes} raises C{DependencyError} with the candidate version, if a package is marked for installation, but not in the requested changes. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) package = foo1.package self.assertEqual(package.candidate, foo2) package.mark_install() self.assertEqual([package], self.facade._cache.get_changes()) self.assertTrue(package.marked_install) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual([foo2], error.packages) def test_wb_check_changes_unapproved_install_specific_version(self): """ C{_check_changes} raises C{DependencyError} with the candidate version, if a package is marked for installation with a non-default candidate version. """ deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) package = foo1.package package.candidate = foo1 package.mark_install() self.assertEqual([package], self.facade._cache.get_changes()) self.assertTrue(package.marked_install) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual([foo1], error.packages) def test_check_changes_unapproved_remove(self): """ C{_check_changes} raises C{DependencyError} with the installed version, if a package is marked for removal and the change isn't in the requested changes. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") foo.package.mark_delete() self.assertEqual([foo.package], self.facade._cache.get_changes()) self.assertTrue(foo.package.marked_delete) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual([foo], error.packages) def test_check_changes_unapproved_remove_with_update_available(self): """ C{_check_changes} raises C{DependencyError} with the installed version, if a package is marked for removal and there is an update available. """ self._add_system_package("foo", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) package = foo1.package package.mark_delete() self.assertEqual([package], self.facade._cache.get_changes()) self.assertTrue(package.marked_delete) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual([foo1], error.packages) def test_check_changes_unapproved_upgrade(self): """ If a package is marked to be upgraded, C{_check_changes} raises C{DependencyError} with the installed version and the version to be upgraded to. """ self._add_system_package("foo", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.assertEqual(foo1.package, foo2.package) package = foo1.package package.mark_install() self.assertEqual([package], self.facade._cache.get_changes()) self.assertTrue(package.marked_upgrade) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual(set([foo1, foo2]), set(error.packages)) def test_check_changes_unapproved_downgrade(self): """ If a package is marked to be downgraded, C{_check_changes} raises C{DependencyError} with the installed version and the version to be downgraded to. """ self._add_system_package("foo", version="2.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.0") self._add_package_to_deb_dir(deb_dir, "foo", version="3.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo"))[:2] self.assertEqual(foo1.package, foo2.package) package = foo1.package package.candidate = foo1 package.mark_install() self.assertEqual([package], self.facade._cache.get_changes()) self.assertTrue(package.marked_downgrade) error = self.assertRaises( DependencyError, self.facade._check_changes, []) self.assertEqual(set([foo1, foo2]), set(error.packages)) def test_mark_global_upgrade_dependency_error(self): """ If a package is marked for upgrade, a DependencyError will be raised, indicating which version of the package will be installed and which will be removed. """ deb_dir = self.makeDir() self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir( deb_dir, "foo", version="1.5", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo_10, foo_15 = sorted(self.facade.get_packages_by_name("foo")) [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_global_upgrade() error = self.assertRaises(DependencyError, self.facade.perform_changes) self.assertEqual( sorted([bar, foo_10, foo_15], key=self.version_sortkey), sorted(error.packages, key=self.version_sortkey)) def test_mark_remove_dependency_error(self): """ If a dependency hasn't been marked for removal, DependencyError is raised with the packages that need to be removed. """ self._add_system_package("foo") self._add_system_package("bar", control_fields={"Depends": "foo"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_remove(foo) error = self.assertRaises(DependencyError, self.facade.perform_changes) self.assertEqual([bar], error.packages) def test_mark_remove_held_packages(self): """ If a package that is on hold is marked for removal, a C{TransactionError} is raised by C{perform_changes}. """ self._add_system_package( "foo", control_fields={"Status": "hold ok installed"}) self._add_system_package( "bar", control_fields={"Status": "hold ok installed"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_remove(foo) self.facade.mark_remove(bar) error = self.assertRaises( TransactionError, self.facade.perform_changes) self.assertEqual( "Can't perform the changes, since the following packages" + " are held: bar, foo", error.args[0]) def test_changer_upgrade_package(self): """ When the {PackageChanger} requests for a package to be upgraded, it requests that the new version is to be installed, and the old version to be removed. This is how you had to do it with Smart. With Apt we have to take care of not marking the old version for removal, since that can result in packages that depend on the upgraded package to be removed. """ self._add_system_package( "foo", control_fields={"Depends": "bar"}) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar", version="2.0") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() bar_1, bar_2 = sorted(self.facade.get_packages_by_name("bar")) self.facade.mark_install(bar_2) self.facade.mark_remove(bar_1) self.patch_cache_commit() self.facade.perform_changes() [bar] = self.facade._cache.get_changes() self.assertTrue(bar.marked_upgrade) def test_mark_global_upgrade_held_packages(self): """ If a package that is on hold is marked for upgrade, C{perform_changes} won't request to install a newer version of that package. """ self._add_system_package( "foo", version="1.0", control_fields={"Status": "hold ok installed"}) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo_10, foo_15] = sorted(self.facade.get_packages_by_name("foo")) self.facade.mark_global_upgrade() self.assertEqual(None, self.facade.perform_changes()) self.assertEqual(foo_10, foo_15.package.installed) def test_mark_global_upgrade_held_dependencies(self): """ If a package that can be upgraded, but that package depends on a newer version of a held package, the first package won't be marked as requiring upgrade. """ self._add_system_package( "foo", version="1.0", control_fields={"Status": "hold ok installed"}) self._add_system_package( "bar", version="1.0", control_fields={"Depends": "foo"}) deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="2.0") self._add_package_to_deb_dir( deb_dir, "bar", version="2.0", control_fields={"Depends": "foo (>> 1.0)"}) self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [foo_1, foo_2] = sorted(self.facade.get_packages_by_name("foo")) [bar_1, bar_2] = sorted(self.facade.get_packages_by_name("bar")) self.facade.mark_global_upgrade() self.assertEqual(None, self.facade.perform_changes()) self.assertEqual(foo_1, foo_2.package.installed) self.assertEqual(bar_1, bar_2.package.installed) def test_get_locked_packages_simple(self): """ C{get_locked_packages} returns all packages that are marked as being held. Locks came from the Smart implementation, but since a locked installed package basically is the same as a package with a dpkg hold, having C{get_locked_packages} return all the held packages, the Landscape server UI won't try to upgrade those packages to a newer version. """ self._add_system_package( "foo", control_fields={"Status": "hold ok installed"}) self._add_system_package("bar") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.assertEqual([foo], self.facade.get_locked_packages()) def test_get_locked_packages_multi(self): """ C{get_locked_packages} returns only the installed version of the held package. """ self._add_system_package( "foo", version="1.0", control_fields={"Status": "hold ok installed"}) self._add_system_package("bar", version="1.0") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "foo", version="1.5") self._add_package_to_deb_dir(deb_dir, "bar", version="1.5") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() foo_10 = sorted(self.facade.get_packages_by_name("foo"))[0] self.assertEqual([foo_10], self.facade.get_locked_packages()) def test_perform_changes_dependency_error_same_version(self): """ Apt's Version objects have the same hash if the version string is the same. So if we have two different packages having the same version, perform_changes() needs to take the package into account when finding out which changes were requested. """ self._add_system_package("foo", version="1.0") self._add_system_package( "bar", version="1.0", control_fields={"Depends": "foo"}) self._add_system_package( "baz", version="1.0", control_fields={"Depends": "foo"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") [baz] = self.facade.get_packages_by_name("baz") self.facade.mark_remove(foo) error = self.assertRaises(DependencyError, self.facade.perform_changes) self.assertEqual( sorted(error.packages, key=self.version_sortkey), sorted([bar, baz], key=self.version_sortkey)) def test_get_package_holds_with_no_hold(self): """ If no package holds are set, C{get_package_holds} returns an empty C{list}. """ self._add_system_package("foo") self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) def test_get_package_holds_with_holds(self): """ If package holds are set, C{get_package_holds} returns the name of the packages that are held. """ self._add_system_package( "foo", control_fields={"Status": "hold ok installed"}) self._add_system_package("bar") self._add_system_package( "baz", control_fields={"Status": "hold ok installed"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [baz] = self.facade.get_packages_by_name("baz") self.assertEqual( ["baz", "foo"], sorted(self.facade.get_package_holds())) def test_mark_hold_and_perform_hold_changes(self): """ Test that L{perform_hold_changes} holds packages that have previously been marked for hold. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_hold(foo) self.assertEqual("Package holds successfully changed.", self.facade._perform_hold_changes()) self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) def test_mark_hold(self): """ C{mark_hold} marks a package to be held. """ self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_hold(foo) self.facade.perform_changes() self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) def test_two_holds_with_the_same_version_id(self): """ Test C{mark_hold} can distinguish between two different packages with the same version number (the version number is used to make the unique hash for the package version). """ self._add_system_package("foo", version="1.0") self._add_system_package("bar", version="1.0") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_hold(foo) self.facade.mark_hold(bar) self.assertEqual(2, len(self.facade._version_hold_creations)) def test_mark_hold_existing_hold(self): """ If a package is already held, C{mark_hold} and C{perform_changes} won't return an error. """ self._add_system_package( "foo", control_fields={"Status": "hold ok installed"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_hold(foo) self.facade.perform_changes() self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) def test_mark_remove_hold(self): """ C{mark_remove_hold} marks a package as not held. """ self._add_system_package( "foo", control_fields={"Status": "hold ok installed"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove_hold(foo) self.facade.perform_changes() self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) def test_mark_remove_hold_no_package(self): """ If a package doesn't exist, C{mark_remove_hold} followed by C{perform_changes} doesn't return an error. It's up to the caller to make sure that the package exist, if it's important. """ self._add_system_package("foo") deb_dir = self.makeDir() self._add_package_to_deb_dir(deb_dir, "bar") self.facade.add_channel_apt_deb("file://%s" % deb_dir, "./") self.facade.reload_channels() [bar] = self.facade.get_packages_by_name("bar") self.facade.mark_remove_hold(bar) self.facade.perform_changes() self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) def test_mark_remove_hold_no_hold(self): """ If a package isn't held, the existing selection is retained when C{mark_remove_hold} and C{perform_changes} are called. """ self._add_system_package( "foo", control_fields={"Status": "deinstall ok installed"}) self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") self.facade.mark_remove_hold(foo) self.facade.perform_changes() self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) [foo] = self.facade.get_packages_by_name("foo") self.assertEqual( apt_pkg.SELSTATE_DEINSTALL, foo.package._pkg.selected_state) if not hasattr(Package, "shortname"): # The 'shortname' attribute was added when multi-arch support # was added to python-apt. So if it's not there, it means that # multi-arch support isn't available. skip_message = "multi-arch not supported" test_wb_mark_install_upgrade_non_main_arch_dependency_error.skip = ( skip_message) test_wb_mark_install_upgrade_non_main_arch.skip = skip_message landscape-client-14.01/landscape/package/tests/test_store.py0000644000175000017500000005445612301414317024007 0ustar andreasandreasimport threading import time import sqlite3 from landscape.tests.helpers import LandscapeTest from landscape.package.store import (HashIdStore, PackageStore, UnknownHashIDRequest, InvalidHashIdDb) class HashIdStoreTest(LandscapeTest): def setUp(self): super(HashIdStoreTest, self).setUp() self.filename = self.makeFile() self.store1 = HashIdStore(self.filename) self.store2 = HashIdStore(self.filename) def test_set_and_get_hash_id(self): self.store1.set_hash_ids({"ha\x00sh1": 123, "ha\x00sh2": 456}) self.assertEqual(self.store1.get_hash_id("ha\x00sh1"), 123) self.assertEqual(self.store1.get_hash_id("ha\x00sh2"), 456) def test_get_hash_ids(self): hash_ids = {"hash1": 123, "hash2": 456} self.store1.set_hash_ids(hash_ids) self.assertEqual(self.store1.get_hash_ids(), hash_ids) def test_wb_lazy_connection(self): """ The connection to the sqlite database is created only when some query gets actually requsted. """ self.assertEqual(self.store1._db, None) self.store1.get_hash_ids() self.assertTrue(isinstance(self.store1._db, sqlite3.Connection)) def test_wb_transactional_commits(self): """ If the operation run by the store succeeds, C{commit} is called once on the connection. """ db = sqlite3.connect(self.store1._filename) commits = [] class FakeDb(object): def __getattr__(self, name): if name == "commit": return self.commit return getattr(db, name) def commit(self): commits.append(None) self.store1._db = FakeDb() self.store1.set_hash_ids({}) self.assertEqual([None], commits) def test_wb_transactional_rolls_back(self): """ If the operation run by the store fails, C{rollback} is called once on the connection. """ db = sqlite3.connect(self.store1._filename) rollbacks = [] class FakeDb(object): def __getattr__(self, name): if name == "rollback": return self.rollback return getattr(db, name) def rollback(self): rollbacks.append(None) self.store1._db = FakeDb() self.assertRaises(Exception, self.store1.set_hash_ids, None) self.assertEqual([None], rollbacks) def test_get_id_hash(self): self.store1.set_hash_ids({"hash1": 123, "hash2": 456}) self.assertEqual(self.store2.get_id_hash(123), "hash1") self.assertEqual(self.store2.get_id_hash(456), "hash2") def test_clear_hash_ids(self): self.store1.set_hash_ids({"ha\x00sh1": 123, "ha\x00sh2": 456}) self.store1.clear_hash_ids() self.assertEqual(self.store2.get_hash_id("ha\x00sh1"), None) self.assertEqual(self.store2.get_hash_id("ha\x00sh2"), None) def test_get_unexistent_hash(self): self.assertEqual(self.store1.get_hash_id("hash1"), None) def test_get_unexistent_id(self): self.assertEqual(self.store1.get_id_hash(123), None) def test_overwrite_id_hash(self): self.store1.set_hash_ids({"hash1": 123}) self.store2.set_hash_ids({"hash2": 123}) self.assertEqual(self.store1.get_hash_id("hash1"), None) self.assertEqual(self.store1.get_hash_id("hash2"), 123) def test_overwrite_hash_id(self): self.store1.set_hash_ids({"hash1": 123}) self.store2.set_hash_ids({"hash1": 456}) self.assertEqual(self.store1.get_id_hash(123), None) self.assertEqual(self.store1.get_id_hash(456), "hash1") def test_set_hash_ids_timing(self): """Setting 20k hashes must take less than 5 seconds.""" hashes = dict((str(i), i) for i in range(20000)) started = time.time() self.store1.set_hash_ids(hashes) self.assertTrue(time.time() - started < 5, "Setting 20k hashes took more than 5 seconds.") def test_check_sanity(self): store_filename = self.makeFile() db = sqlite3.connect(store_filename) cursor = db.cursor() cursor.execute("CREATE TABLE hash" " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)") cursor.close() db.commit() store = HashIdStore(store_filename) self.assertRaises(InvalidHashIdDb, store.check_sanity) class PackageStoreTest(LandscapeTest): def setUp(self): super(PackageStoreTest, self).setUp() self.filename = self.makeFile() self.store1 = PackageStore(self.filename) self.store2 = PackageStore(self.filename) def test_has_hash_id_db(self): self.assertFalse(self.store1.has_hash_id_db()) hash_id_db_filename = self.makeFile() HashIdStore(hash_id_db_filename) self.store1.add_hash_id_db(hash_id_db_filename) self.assertTrue(self.store1.has_hash_id_db()) def test_add_hash_id_db_with_non_sqlite_file(self): def junk_db_factory(): filename = self.makeFile() open(filename, "w").write("junk") return filename def raiseme(): store_filename = junk_db_factory() try: self.store1.add_hash_id_db(store_filename) except InvalidHashIdDb, e: self.assertEqual(str(e), store_filename) else: self.fail() raiseme() self.assertFalse(self.store1.has_hash_id_db()) def test_add_hash_id_db_with_wrong_schema(self): def non_compliant_db_factory(): filename = self.makeFile() db = sqlite3.connect(filename) cursor = db.cursor() cursor.execute("CREATE TABLE hash" " (junk INTEGER PRIMARY KEY, hash BLOB UNIQUE)") cursor.close() db.commit() return filename self.assertRaises(InvalidHashIdDb, self.store1.add_hash_id_db, non_compliant_db_factory()) self.assertFalse(self.store1.has_hash_id_db()) def hash_id_db_factory(self, hash_ids): filename = self.makeFile() store = HashIdStore(filename) store.set_hash_ids(hash_ids) return filename def test_get_hash_id_using_hash_id_dbs(self): # Without hash=>id dbs self.assertEqual(self.store1.get_hash_id("hash1"), None) self.assertEqual(self.store1.get_hash_id("hash2"), None) # This hash=>id will be overriden self.store1.set_hash_ids({"hash1": 1}) # Add a couple of hash=>id dbs self.store1.add_hash_id_db(self.hash_id_db_factory({"hash1": 2, "hash2": 3})) self.store1.add_hash_id_db(self.hash_id_db_factory({"hash2": 4, "ha\x00sh1": 5})) # Check look-up priorities and binary hashes self.assertEqual(self.store1.get_hash_id("hash1"), 2) self.assertEqual(self.store1.get_hash_id("hash2"), 3) self.assertEqual(self.store1.get_hash_id("ha\x00sh1"), 5) def test_get_id_hash_using_hash_id_db(self): """ When lookaside hash->id dbs are used, L{get_id_hash} has to query them first, falling back to the regular db in case the desired mapping is not found. """ self.store1.add_hash_id_db(self.hash_id_db_factory({"hash1": 123})) self.store1.add_hash_id_db(self.hash_id_db_factory({"hash1": 999, "hash2": 456})) self.store1.set_hash_ids({"hash3": 789}) self.assertEqual(self.store1.get_id_hash(123), "hash1") self.assertEqual(self.store1.get_id_hash(456), "hash2") self.assertEqual(self.store1.get_id_hash(789), "hash3") def test_add_and_get_available_packages(self): self.store1.add_available([1, 2]) self.assertEqual(self.store2.get_available(), [1, 2]) def test_add_available_conflicting(self): """Adding the same available pacakge id twice is fine.""" self.store1.add_available([1]) self.store1.add_available([1]) self.assertEqual(self.store2.get_available(), [1]) def test_add_available_timing(self): """Adding 20k ids must take less than 5 seconds.""" started = time.time() self.store1.add_available(range(20000)) self.assertTrue(time.time() - started < 5, "Adding 20k available ids took more than 5 seconds.") def test_remove_available(self): self.store1.add_available([1, 2, 3, 4]) self.store1.remove_available([2, 3]) self.assertEqual(self.store2.get_available(), [1, 4]) def test_remove_available_timing(self): self.store1.add_available(range(20000)) started = time.time() self.store1.remove_available(range(20000)) self.assertTrue(time.time() - started < 5, "Removing 20k available ids took more than 5 seconds.") def test_clear_available(self): self.store1.add_available([1, 2, 3, 4]) self.store1.clear_available() self.assertEqual(self.store2.get_available(), []) def test_add_and_get_available_upgrades_packages(self): self.store1.add_available_upgrades([1, 2]) self.assertEqual(self.store2.get_available_upgrades(), [1, 2]) def test_add_available_upgrades_conflicting(self): """Adding the same available_upgrades pacakge id twice is fine.""" self.store1.add_available_upgrades([1]) self.store1.add_available_upgrades([1]) self.assertEqual(self.store2.get_available_upgrades(), [1]) def test_add_available_upgrades_timing(self): """Adding 20k ids must take less than 5 seconds.""" started = time.time() self.store1.add_available_upgrades(range(20000)) self.assertTrue(time.time() - started < 5, "Adding 20k available upgrades ids took " "more than 5 seconds.") def test_remove_available_upgrades(self): self.store1.add_available_upgrades([1, 2, 3, 4]) self.store1.remove_available_upgrades([2, 3]) self.assertEqual(self.store2.get_available_upgrades(), [1, 4]) def test_remove_available_upgrades_timing(self): self.store1.add_available_upgrades(range(20000)) started = time.time() self.store1.remove_available_upgrades(range(20000)) self.assertTrue(time.time() - started < 5, "Removing 20k available upgrades ids took " "more than 5 seconds.") def test_clear_available_upgrades(self): self.store1.add_available_upgrades([1, 2, 3, 4]) self.store1.clear_available_upgrades() self.assertEqual(self.store2.get_available_upgrades(), []) def test_add_and_get_installed_packages(self): self.store1.add_installed([1, 2]) self.assertEqual(self.store2.get_installed(), [1, 2]) def test_add_installed_conflicting(self): """Adding the same installed pacakge id twice is fine.""" self.store1.add_installed([1]) self.store1.add_installed([1]) self.assertEqual(self.store2.get_installed(), [1]) def test_add_installed_timing(self): """Adding 20k ids must take less than 5 seconds.""" started = time.time() self.store1.add_installed(range(20000)) self.assertTrue(time.time() - started < 5, "Adding 20k installed ids took more than 5 seconds.") def test_remove_installed(self): self.store1.add_installed([1, 2, 3, 4]) self.store1.remove_installed([2, 3]) self.assertEqual(self.store2.get_installed(), [1, 4]) def test_remove_installed_timing(self): self.store1.add_installed(range(20000)) started = time.time() self.store1.remove_installed(range(20000)) self.assertTrue(time.time() - started < 5, "Removing 20k installed ids took more than 5 seconds.") def test_clear_installed(self): self.store1.add_installed([1, 2, 3, 4]) self.store1.clear_installed() self.assertEqual(self.store2.get_installed(), []) def test_ensure_package_schema_with_new_tables(self): """ The L{ensure_package_schema} function behaves correctly when new tables are added. """ filename = self.makeFile() database = sqlite3.connect(filename) cursor = database.cursor() cursor.execute("CREATE TABLE available" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE available_upgrade" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE installed" " (id INTEGER PRIMARY KEY)") cursor.execute("CREATE TABLE hash_id_request" " (id INTEGER PRIMARY KEY, timestamp TIMESTAMP," " message_id INTEGER, hashes BLOB)") cursor.execute("CREATE TABLE task" " (id INTEGER PRIMARY KEY, queue TEXT," " timestamp TIMESTAMP, data BLOB)") cursor.close() database.commit() database.close() store = PackageStore(filename) store.get_locked() database = sqlite3.connect(filename) cursor = database.cursor() cursor.execute("pragma table_info(locked)") result = cursor.fetchall() self.assertTrue(len(result) > 0) def test_add_and_get_locked(self): """ L{PackageStore.add_locked} adds the given ids to the table of locked packages and commits the changes. """ self.store1.add_locked([1]) self.assertEqual(self.store2.get_locked(), [1]) def test_add_locked_conflicting(self): """Adding the same locked pacakge id twice is fine.""" self.store1.add_locked([1]) self.store1.add_locked([1]) self.assertEqual(self.store2.get_locked(), [1]) def test_remove_locked(self): """ L{PackageStore.removed_locked} remove the given ids from the table of locked packages and commits the changes. """ self.store1.add_locked([1, 2, 3, 4]) self.store1.remove_locked([2, 3]) self.assertEqual(self.store2.get_locked(), [1, 4]) def test_remove_locked_non_existing(self): """ Removing non-existing locked packages is fine. """ self.store1.remove_locked([1]) self.assertEqual(self.store2.get_locked(), []) def test_clear_locked(self): """ L{PackageStore.clear_locked} clears the table of locked packages by removing all its package ids. """ self.store1.add_locked([1, 2, 3, 4]) self.store1.clear_locked() self.assertEqual(self.store2.get_locked(), []) def test_add_hash_id_request(self): hashes = ("ha\x00sh1", "ha\x00sh2") request1 = self.store1.add_hash_id_request(hashes) request2 = self.store2.get_hash_id_request(request1.id) self.assertEqual(request1.id, request2.id) self.assertEqual(request1.hashes, list(hashes)) self.assertEqual(request2.hashes, list(hashes)) def test_iter_hash_id_requests(self): hashes1 = ["ha\x00sh1", "ha\x00sh2"] hashes2 = ["ha\x00sh3", "ha\x00sh4"] self.store1.add_hash_id_request(hashes1) self.store1.add_hash_id_request(hashes2) hashes = [hash for request in self.store2.iter_hash_id_requests() for hash in request.hashes] self.assertEqual(hashes, hashes1 + hashes2) def test_get_initial_hash_id_request_timestamp(self): time_mock = self.mocker.replace("time.time") time_mock() self.mocker.result(123) self.mocker.replay() try: request1 = self.store1.add_hash_id_request(["hash1"]) request2 = self.store2.get_hash_id_request(request1.id) self.assertEqual(request2.timestamp, 123) # We handle mocker explicitly so that our hacked time() # won't break Twisted's internals. self.mocker.verify() finally: self.mocker.reset() def test_update_hash_id_request_timestamp(self): request1 = self.store1.add_hash_id_request(["hash1"]) request2 = self.store2.get_hash_id_request(request1.id) request1.timestamp = 456 self.assertEqual(request2.timestamp, 456) def test_default_hash_id_request_message_id(self): request = self.store1.add_hash_id_request(["hash1"]) self.assertEqual(request.message_id, None) def test_update_hash_id_request_message_id(self): request1 = self.store1.add_hash_id_request(["hash1"]) request2 = self.store2.get_hash_id_request(request1.id) request1.message_id = 456 self.assertEqual(request2.message_id, 456) def test_get_hash_id_request_with_unknown_request_id(self): self.assertRaises(UnknownHashIDRequest, self.store1.get_hash_id_request, 123) def test_remove_hash_id_request(self): request = self.store1.add_hash_id_request(["hash1"]) request.remove() self.assertRaises(UnknownHashIDRequest, self.store1.get_hash_id_request, request.id) def test_add_task(self): data = {"answer": 42} task = self.store1.add_task("reporter", data) self.assertEqual(type(task.id), int) self.assertEqual(task.queue, "reporter") self.assertEqual(task.data, data) def test_get_next_task(self): task1 = self.store1.add_task("reporter", [1]) task2 = self.store1.add_task("reporter", [2]) task3 = self.store1.add_task("changer", [3]) task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task1.id) self.assertEqual(task.data, [1]) task = self.store2.get_next_task("changer") self.assertEqual(task.id, task3.id) self.assertEqual(task.data, [3]) task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task1.id) self.assertEqual(task.data, [1]) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task2.id) self.assertEqual(task.data, [2]) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task, None) def test_get_task_timestamp(self): time_mock = self.mocker.replace("time.time") time_mock() self.mocker.result(123) self.mocker.replay() try: self.store1.add_task("reporter", [1]) task = self.store2.get_next_task("reporter") self.assertEqual(task.timestamp, 123) # We handle mocker explicitly so that our hacked time() # won't break Twisted's internals. self.mocker.verify() finally: self.mocker.reset() def test_next_tasks_ordered_by_timestamp(self): time_mock = self.mocker.replace("time.time") time_mock() self.mocker.result(222) time_mock() self.mocker.result(111) self.mocker.replay() try: self.store1.add_task("reporter", [1]) self.store1.add_task("reporter", [2]) task = self.store2.get_next_task("reporter") self.assertEqual(task.timestamp, 111) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task.timestamp, 222) # We handle mocker explicitly so that our hacked time() # won't break Twisted's internals. self.mocker.verify() finally: self.mocker.reset() def test_clear_hash_id_requests(self): request1 = self.store1.add_hash_id_request(["hash1"]) request2 = self.store1.add_hash_id_request(["hash2"]) self.store1.clear_hash_id_requests() self.assertRaises(UnknownHashIDRequest, self.store1.get_hash_id_request, request1.id) self.assertRaises(UnknownHashIDRequest, self.store1.get_hash_id_request, request2.id) def test_clear_tasks(self): data = {"answer": 42} task = self.store1.add_task("reporter", data) self.assertEqual(type(task.id), int) self.assertEqual(task.queue, "reporter") self.assertEqual(task.data, data) self.store1.clear_tasks() task = self.store2.get_next_task("reporter") self.assertEqual(task, None) def test_clear_tasks_except_1_task(self): data = {"answer": 42} task = self.store1.add_task("reporter", data) data = {"answer": 43} task2 = self.store1.add_task("reporter", data) self.store1.clear_tasks(except_tasks=(task2,)) task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task2.id) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task, None) def test_clear_tasks_except_2_tasks(self): data = {"answer": 42} task = self.store1.add_task("reporter", data) data = {"answer": 43} task2 = self.store1.add_task("reporter", data) data = {"answer": 44} task3 = self.store1.add_task("reporter", data) self.store1.clear_tasks(except_tasks=(task2, task3)) task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task2.id) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task.id, task3.id) task.remove() task = self.store2.get_next_task("reporter") self.assertEqual(task, None) def test_parallel_database_access(self): error = [] def func1(): func1.store1 = PackageStore(self.filename) func1.store1.add_task("reporter", "data") func1.store1.add_task("reporter", "data") func1.task = func1.store1.get_next_task("reporter") def func2(): func2.store2 = PackageStore(self.filename) try: func2.store2.add_task("reporter", "data") except Exception, e: error.append(str(e)) for func in [func1, func2]: thread = threading.Thread(target=func) thread.start() thread.join() self.assertEqual(error, []) landscape-client-14.01/landscape/package/tests/test_releaseupgrader.py0000644000175000017500000011176212301414317026017 0ustar andreasandreasimport os import unittest import signal import tarfile import ConfigParser from twisted.internet import reactor from twisted.internet.defer import succeed, fail, Deferred from landscape.lib.gpg import InvalidGPGSignature from landscape.lib.fetch import HTTPCodeError from landscape.package.store import PackageStore from landscape.package.releaseupgrader import ( ReleaseUpgrader, ReleaseUpgraderConfiguration, main) from landscape.tests.helpers import ( LandscapeTest, BrokerServiceHelper, LogKeeperHelper, EnvironSaverHelper) from landscape.manager.manager import SUCCEEDED, FAILED class ReleaseUpgraderConfigurationTest(unittest.TestCase): def test_upgrade_tool_directory(self): """ L{ReleaseUpgraderConfiguration.upgrade_tool_directory} returns the path to the directory holding the fetched upgrade-tool files. """ config = ReleaseUpgraderConfiguration() self.assertEqual(config.upgrade_tool_directory, os.path.join(config.package_directory, "upgrade-tool")) class ReleaseUpgraderTest(LandscapeTest): helpers = [LogKeeperHelper, EnvironSaverHelper, BrokerServiceHelper] def setUp(self): super(ReleaseUpgraderTest, self).setUp() self.config = ReleaseUpgraderConfiguration() self.config.data_path = self.makeDir() os.mkdir(self.config.package_directory) os.mkdir(self.config.upgrade_tool_directory) self.store = PackageStore(self.makeFile()) self.upgrader = ReleaseUpgrader(self.store, None, self.remote, self.config) self.upgrader.get_session_id() service = self.broker_service service.message_store.set_accepted_types(["operation-result"]) def get_pending_messages(self): return self.broker_service.message_store.get_pending_messages() def test_fetch(self): """ L{ReleaseUpgrader.fetch} fetches the upgrade tool tarball and signature from the given URLs. """ tarball_url = "http://some/where/karmic.tar.gz" signature_url = "http://some/where/karmic.tar.gz.gpg" os.rmdir(self.config.upgrade_tool_directory) fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch_async") fetch_mock(tarball_url) self.mocker.result(succeed("tarball")) fetch_mock(signature_url) self.mocker.result(succeed("signature")) self.mocker.replay() result = self.upgrader.fetch(tarball_url, signature_url) def check_result(ignored): directory = self.config.upgrade_tool_directory self.assertFileContent( os.path.join(directory, "karmic.tar.gz"), "tarball") self.assertFileContent( os.path.join(directory, "karmic.tar.gz.gpg"), "signature") self.assertIn("INFO: Successfully fetched upgrade-tool files", self.logfile.getvalue()) result.addCallback(check_result) return result def test_fetch_with_errors(self): """ L{ReleaseUpgrader.fetch} logs a warning in case any of the upgrade tool files fails to be fetched. """ tarball_url = "http://some/where/karmic.tar.gz" signature_url = "http://some/where/karmic.tar.gz.gpg" fetch_mock = self.mocker.replace("landscape.lib.fetch.fetch_async") fetch_mock(tarball_url) self.mocker.result(succeed("tarball")) fetch_mock(signature_url) self.mocker.result(fail(HTTPCodeError(404, "not found"))) self.mocker.replay() result = self.upgrader.fetch(tarball_url, signature_url) def check_failure(failure): self.assertIn("WARNING: Couldn't fetch file from %s (Server return" "ed HTTP code 404)" % signature_url, self.logfile.getvalue()) self.assertIn("WARNING: Couldn't fetch all upgrade-tool files", self.logfile.getvalue()) result.addCallback(self.fail) result.addErrback(check_failure) return result def test_verify(self): """ L{ReleaseUpgrader.verify} verifies the upgrade tool tarball against its signature. """ tarball_filename = "/some/tarball" signature_filename = "/some/signature" gpg_verify_mock = self.mocker.replace("landscape.lib.gpg.gpg_verify") gpg_verify_mock(tarball_filename, signature_filename) self.mocker.result(succeed(True)) self.mocker.replay() result = self.upgrader.verify(tarball_filename, signature_filename) def check_result(ignored): self.assertIn("INFO: Successfully verified upgrade-tool tarball", self.logfile.getvalue()) result.addCallback(check_result) return result def test_verify_invalid_signature(self): """ L{ReleaseUpgrader.verify} logs a warning in case the tarball signature is not valid. """ tarball_filename = "/some/tarball" signature_filename = "/some/signature" gpg_verify_mock = self.mocker.replace("landscape.lib.gpg.gpg_verify") gpg_verify_mock(tarball_filename, signature_filename) self.mocker.result(fail(InvalidGPGSignature("gpg error"))) self.mocker.replay() result = self.upgrader.verify(tarball_filename, signature_filename) def check_failure(failure): self.assertIn("WARNING: Invalid signature for upgrade-tool " "tarball: gpg error", self.logfile.getvalue()) result.addCallback(self.fail) result.addErrback(check_failure) return result def test_extract(self): """ The L{ReleaseUpgrader.extract} method extracts the upgrade-tool tarball in the proper directory. """ original_filename = self.makeFile("data\n") tarball_filename = self.makeFile() tarball = tarfile.open(tarball_filename, "w:gz") tarball.add(original_filename, arcname="file") tarball.close() result = self.upgrader.extract(tarball_filename) def check_result(ignored): filename = os.path.join(self.config.upgrade_tool_directory, "file") self.assertTrue(os.path.exists(filename)) self.assertFileContent(filename, "data\n") result.addCallback(check_result) return result def test_tweak_fixes_broken_dapper_config(self): """ The L{ReleaseUpgrader.tweak} method fixes a missing section in the dapper config files included in the upgrade tool tarball. """ config_filename = os.path.join(self.config.upgrade_tool_directory, "DistUpgrade.cfg.dapper") self.makeFile(path=config_filename, content="[Files]\n" "BackupExt=distUpgrade\n" "LogDir=/var/log/dist-upgrade\n") result = self.upgrader.tweak("dapper") def check_result(ignored): config = ConfigParser.ConfigParser() config.read(config_filename) self.assertFalse(config.getboolean("NonInteractive", "ForceOverwrite")) result.addCallback(check_result) return result def test_tweak_does_not_change_good_dapper_config(self): """ The L{ReleaseUpgrader.tweak} method doesn't change the dapper config file if it's not broken. """ config_filename = os.path.join(self.config.upgrade_tool_directory, "DistUpgrade.cfg.dapper") self.makeFile(path=config_filename, content="[NonInteractive]\n" "ForceOverwrite=No\n") result = self.upgrader.tweak("dapper") def check_result(ignored): config = ConfigParser.ConfigParser() config.read(config_filename) self.assertFalse(config.getboolean("NonInteractive", "ForceOverwrite")) result.addCallback(check_result) return result def test_tweak_includes_landscape_ppa_in_mirrors(self): """ The L{ReleaseUpgrader.tweak} method adds the Landscape PPA repository to the list of available mirrors. """ mirrors_filename = os.path.join(self.config.upgrade_tool_directory, "mirrors.cfg") self.makeFile(path=mirrors_filename, content="ftp://ftp.lug.ro/ubuntu/\n") def check_result(ignored): self.assertFileContent(mirrors_filename, "ftp://ftp.lug.ro/ubuntu/\n" "http://ppa.launchpad.net/landscape/" "trunk/ubuntu/\n") result = self.upgrader.tweak("hardy") result.addCallback(check_result) return result def test_tweak_sets_dbus_start_script(self): """ The L{ReleaseUpgrader.tweak} method adds to the upgrade-tool configuration a little script that starts dbus after the upgrade. """ config_filename = os.path.join(self.config.upgrade_tool_directory, "DistUpgrade.cfg.dapper") self.makeFile(path=config_filename, content="[Distro]\n" "PostInstallScripts=/foo.sh\n") def check_result(ignored): config = ConfigParser.ConfigParser() config.read(config_filename) self.assertEqual(config.get("Distro", "PostInstallScripts"), "/foo.sh, ./dbus.sh") dbus_sh = os.path.join(self.config.upgrade_tool_directory, "dbus.sh") self.assertFileContent(dbus_sh, "#!/bin/sh\n" "/etc/init.d/dbus start\n" "sleep 10\n") result = self.upgrader.tweak("dapper") result.addCallback(check_result) return result def test_tweak_sets_dbus_start_script_with_no_post_install_scripts(self): """ The L{ReleaseUpgrader.tweak} method adds to the upgrade-tool configuration a little script that starts dbus after the upgrade. This works even when the config file doesn't have a PostInstallScripts entry yet. """ config_filename = os.path.join(self.config.upgrade_tool_directory, "DistUpgrade.cfg.dapper") self.makeFile(path=config_filename, content="") def check_result(ignored): config = ConfigParser.ConfigParser() config.read(config_filename) self.assertEqual(config.get("Distro", "PostInstallScripts"), "./dbus.sh") dbus_sh = os.path.join(self.config.upgrade_tool_directory, "dbus.sh") self.assertFileContent(dbus_sh, "#!/bin/sh\n" "/etc/init.d/dbus start\n" "sleep 10\n") result = self.upgrader.tweak("dapper") result.addCallback(check_result) return result def test_default_logs_directory(self): """ The default directory for the upgrade-tool logs is the system one. """ self.assertEqual(self.upgrader.logs_directory, "/var/log/dist-upgrade") def test_default_logs_limit(self): """ The default read limit for the upgrade-tool logs is 100000 bytes. """ self.assertEqual(self.upgrader.logs_limit, 100000) def test_make_operation_result_text(self): """ L{ReleaseUpgrade.make_operation_result_text} aggregates the contents of the process standard output, error and log files. """ self.upgrader.logs_directory = self.makeDir() self.makeFile(basename="main.log", dirname=self.upgrader.logs_directory, content="main log") self.makeFile(basename="apt.log", dirname=self.upgrader.logs_directory, content="apt log") text = self.upgrader.make_operation_result_text("stdout", "stderr") self.assertEqual(text, "=== Standard output ===\n\n" "stdout\n\n" "=== Standard error ===\n\n" "stderr\n\n" "=== apt.log ===\n\n" "apt log\n\n" "=== main.log ===\n\n" "main log\n\n") def test_make_operation_result_text_with_no_stderr(self): """ L{ReleaseUpgrade.make_operation_result_text} skips the standard error if it's empty. """ self.upgrader.logs_directory = self.makeDir() text = self.upgrader.make_operation_result_text("stdout", "") self.assertEqual(text, "=== Standard output ===\n\n" "stdout\n\n") def test_make_operation_result_text_only_considers_log_files(self): """ L{ReleaseUpgrade.make_operation_result_text} only considers log files from the last upgrade-tool run, directories containing log files from an older run are skipped. """ self.upgrader.logs_directory = self.makeDir() self.makeDir(dirname=self.upgrader.logs_directory) text = self.upgrader.make_operation_result_text("stdout", "stderr") self.assertEqual(text, "=== Standard output ===\n\n" "stdout\n\n" "=== Standard error ===\n\n" "stderr\n\n") def test_make_operation_result_text_trims_long_files(self): """ L{ReleaseUpgrade.make_operation_result_text} only reads the last L{logs_limit} lines of a log file. """ self.upgrader.logs_directory = self.makeDir() self.upgrader.logs_limit = 8 self.makeFile(basename="main.log", dirname=self.upgrader.logs_directory, content="very long log") text = self.upgrader.make_operation_result_text("stdout", "stderr") self.assertEqual(text, "=== Standard output ===\n\n" "stdout\n\n" "=== Standard error ===\n\n" "stderr\n\n" "=== main.log ===\n\n" "long log\n\n") def test_upgrade(self): """ The L{ReleaseUpgrader.upgrade} method spawns the appropropriate upgrade-tool script and reports the result. """ self.upgrader.logs_directory = self.makeDir() upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") fd.write("#!/bin/sh\n" "echo $@\n" "echo FOO=$FOO\n" "echo PWD=$PWD\n" "echo out\n") fd.close() os.chmod(upgrade_tool_filename, 0755) env_backup = os.environ.copy() os.environ.clear() os.environ.update({"FOO": "bar"}) deferred = Deferred() def do_test(): result = self.upgrader.upgrade("karmic", 100) def check_result(ignored): self.assertIn("INFO: Queuing message with release upgrade " "results to exchange urgently.", self.logfile.getvalue()) result_text = (u"=== Standard output ===\n\n" "--frontend DistUpgradeViewNonInteractive\n" "FOO=bar\n" "PWD=%s\nout\n\n\n" % upgrade_tool_directory) self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": SUCCEEDED, "result-text": result_text, "result-code": 0}]) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) def cleanup(ignored): os.environ = env_backup return ignored return deferred.addBoth(cleanup) def test_upgrade_with_server_mode(self): """ The L{ReleaseUpgrader.upgrade} accepts an optional C{mode} parameter which gets passed to the upgrade-tool script as argument for the C{--mode} command line option. """ self.upgrader.logs_directory = self.makeDir() upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "hardy") self.makeFile(path=upgrade_tool_filename, content="#!/bin/sh\n" "echo $@\n") os.chmod(upgrade_tool_filename, 0755) deferred = Deferred() def do_test(): result = self.upgrader.upgrade("hardy", 100, mode="server") def check_result(ignored): result_text = (u"=== Standard output ===\n\n" "--frontend DistUpgradeViewNonInteractive " "--mode server\n\n\n") self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": SUCCEEDED, "result-text": result_text, "result-code": 0}]) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_upgrade_with_env_variables(self): """ The L{ReleaseUpgrader.upgrade} method optionally sets environment variables to be passed to the upgrade-tool process. """ self.upgrader.logs_directory = self.makeDir() upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") fd.write("#!/bin/sh\n" "echo DEBUG_UPDATE_MANAGER=$DEBUG_UPDATE_MANAGER\n" "echo RELEASE_UPRADER_ALLOW_THIRD_PARTY=" "$RELEASE_UPRADER_ALLOW_THIRD_PARTY\n") fd.close() os.chmod(upgrade_tool_filename, 0755) env_backup = os.environ.copy() os.environ.clear() deferred = Deferred() def do_test(): result = self.upgrader.upgrade("karmic", 100, allow_third_party=True, debug=True) def check_result(ignored): result_text = (u"=== Standard output ===\n\n" "DEBUG_UPDATE_MANAGER=True\n" "RELEASE_UPRADER_ALLOW_THIRD_PARTY=True\n\n\n") self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": SUCCEEDED, "result-text": result_text, "result-code": 0}]) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) def cleanup(ignored): os.environ = env_backup return ignored return deferred.addBoth(cleanup) def test_upgrade_with_failure(self): """ The L{ReleaseUpgrader.upgrade} sends a message with failed status field if the upgrade-tool exits with non-zero code. """ self.upgrader.logs_directory = self.makeDir() upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") fd = open(upgrade_tool_filename, "w") fd.write("#!/bin/sh\n" "echo out\n" "echo err >&2\n" "exit 3") fd.close() os.chmod(upgrade_tool_filename, 0755) deferred = Deferred() def do_test(): result = self.upgrader.upgrade("karmic", 100) def check_result(ignored): result_text = (u"=== Standard output ===\n\nout\n\n\n" "=== Standard error ===\n\nerr\n\n\n") self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": FAILED, "result-text": result_text, "result-code": 3}]) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_upgrade_with_open_child_fds(self): """ The deferred returned by the L{ReleaseUpgrader.upgrade} method callbacks correctly even if the spawned upgrade-tool process forks and passes its files descriptors over to child processes we don't know about. """ self.upgrader.logs_directory = self.makeDir() upgrade_tool_directory = self.config.upgrade_tool_directory upgrade_tool_filename = os.path.join(upgrade_tool_directory, "karmic") child_pid_filename = self.makeFile() fd = open(upgrade_tool_filename, "w") fd.write("#!/usr/bin/env python\n" "import os\n" "import time\n" "import sys\n" "if __name__ == '__main__':\n" " print 'First parent'\n" " pid = os.fork()\n" " if pid > 0:\n" " time.sleep(0.5)\n" " sys.exit(0)\n" " pid = os.fork()\n" " if pid > 0:\n" " fd = open('%s', 'w')\n" " fd.write(str(pid))\n" " fd.close()\n" " sys.exit(0)\n" " while True:\n" " time.sleep(2)\n" % child_pid_filename) fd.close() os.chmod(upgrade_tool_filename, 0755) os.environ.clear() os.environ.update({"FOO": "bar"}) deferred = Deferred() def do_test(): result = self.upgrader.upgrade("karmic", 100) def kill_child(how): fd = open(child_pid_filename, "r") child_pid = int(fd.read()) fd.close() os.remove(child_pid_filename) try: os.kill(child_pid, signal.SIGKILL) self.assertEqual(how, "cleanly") return child_pid except OSError: pass force_kill_child = reactor.callLater(2, kill_child, "brutally") def check_result(ignored): force_kill_child.cancel() self.assertIn("INFO: Queuing message with release upgrade " "results to exchange urgently.", self.logfile.getvalue()) kill_child("cleanly") result_text = self.get_pending_messages()[0]["result-text"] self.assertIn("First parent\n", result_text) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) def cleanup(ignored): self.assertFalse(os.path.exists(child_pid_filename)) return ignored return deferred.addBoth(cleanup) def test_finish(self): """ The L{ReleaseUpgrader.finish} method wipes the upgrade-tool directory and spawn the package-reporter, to inform the server of the changed packages. """ upgrade_tool_directory = self.config.upgrade_tool_directory open(os.path.join(upgrade_tool_directory, "somefile"), "w").close() os.mkdir(os.path.join(upgrade_tool_directory, "somedir")) reporter_filename = self.makeFile("#!/bin/sh\n" "echo $@\n" "echo $(pwd)\n") os.chmod(reporter_filename, 0755) find_reporter_mock = self.mocker.replace("landscape.package.reporter." "find_reporter_command") find_reporter_mock() self.mocker.result(reporter_filename) self.mocker.replay() deferred = Deferred() def do_test(): result = self.upgrader.finish() def check_result((out, err, code)): self.assertFalse(os.path.exists(upgrade_tool_directory)) self.assertEqual(out, "--force-apt-update\n%s\n" % os.getcwd()) self.assertEqual(err, "") self.assertEqual(code, 0) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_finish_as_root(self): """ If the release-upgrader process is run as root, as it alwyas should, the L{ReleaseUpgrader.finish} method spawns the package-reporter with the landscape uid and gid. """ find_reporter_mock = self.mocker.replace("landscape.package.reporter." "find_reporter_command") find_reporter_mock() self.mocker.result("reporter") getuid_mock = self.mocker.replace("os.getuid") getuid_mock() self.mocker.result(0) getpwnam_mock = self.mocker.replace("pwd.getpwnam") getpwnam_mock("landscape") class FakePwNam(object): pw_uid = 1234 self.mocker.result(FakePwNam()) getgrnam_mock = self.mocker.replace("grp.getgrnam") getgrnam_mock("landscape") class FakeGrNam(object): gr_gid = 5678 self.mocker.result(FakeGrNam()) spawn_process_calls = [] def spawn_process(pp, reporter, args=None, uid=None, gid=None, path=None, env=None, usePTY=None): self.assertEqual(uid, 1234) self.assertEqual(gid, 5678) spawn_process_calls.append(True) saved_spawn_process = reactor.spawnProcess reactor.spawnProcess = spawn_process self.mocker.replay() try: self.upgrader.finish() finally: reactor.spawnProcess = saved_spawn_process self.assertEqual(spawn_process_calls, [True]) def test_finish_with_config_file(self): """ The L{ReleaseUpgrader.finish} method passes over to the reporter the configuration file the release-upgrader was called with. """ reporter_filename = self.makeFile("#!/bin/sh\necho $@\n") os.chmod(reporter_filename, 0755) self.config.config = "/some/config" find_reporter_mock = self.mocker.replace("landscape.package.reporter." "find_reporter_command") find_reporter_mock() self.mocker.result(reporter_filename) self.mocker.replay() deferred = Deferred() def do_test(): result = self.upgrader.finish() def check_result((out, err, code)): self.assertEqual(out, "--force-apt-update " "--config=/some/config\n") self.assertEqual(err, "") self.assertEqual(code, 0) result.addCallback(check_result) result.chainDeferred(deferred) reactor.callWhenRunning(do_test) return deferred def test_handle_release_upgrade(self): """ The L{ReleaseUpgrader.handle_release_upgrade} method calls the other helper methods in the right order and with the right arguments. """ calls = [] upgrade_tool_directory = self.config.upgrade_tool_directory def fetch(tarball_url, signature_url): self.assertEqual(tarball_url, "http://some/tarball") self.assertEqual(signature_url, "http://some/sign") calls.append("fetch") return succeed(None) def verify(tarball_filename, signature_filename): self.assertEqual(tarball_filename, os.path.join(upgrade_tool_directory, "tarball")) self.assertEqual(signature_filename, os.path.join(upgrade_tool_directory, "sign")) calls.append("verify") def extract(filename_tarball): self.assertEqual(filename_tarball, os.path.join(upgrade_tool_directory, "tarball")) calls.append("extract") def tweak(current_code_name): self.assertEqual(current_code_name, "jaunty") calls.append("tweak") def upgrade(code_name, operation_id, allow_third_party=False, debug=False, mode=None): self.assertEqual(operation_id, 100) self.assertEqual(code_name, "karmic") self.assertTrue(allow_third_party) self.assertFalse(debug) self.assertIdentical(mode, None) calls.append("upgrade") def finish(): calls.append("finish") self.upgrader.fetch = fetch self.upgrader.verify = verify self.upgrader.extract = extract self.upgrader.tweak = tweak self.upgrader.upgrade = upgrade self.upgrader.finish = finish self.upgrader.lsb_release_filename = self.makeFile( "DISTRIB_CODENAME=jaunty\n") message = {"type": "release-upgrade", "code-name": "karmic", "upgrade-tool-tarball-url": "http://some/tarball", "upgrade-tool-signature-url": "http://some/sign", "allow-third-party": True, "operation-id": 100} result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): self.assertEqual(calls, ["fetch", "verify", "extract", "tweak", "upgrade", "finish"]) result.addCallback(check_result) return result def test_handle_release_upgrade_on_dapper_server(self): """ On Dapper server, the L{ReleaseUpgrader.handle_release_upgrade} method calls sets the upgrade-tool running mode to "server". """ calls = [] def upgrade(code_name, operation_id, allow_third_party=False, debug=False, mode=None): self.assertEqual(mode, "server") calls.append("upgrade") self.upgrader.fetch = lambda x, y: succeed(None) self.upgrader.verify = lambda x, y: None self.upgrader.extract = lambda x: None self.upgrader.tweak = lambda x: None self.upgrader.upgrade = upgrade self.upgrader.finish = lambda: None self.upgrader.lsb_release_filename = self.makeFile( "DISTRIB_CODENAME=dapper\n") message = {"type": "release-upgrade", "code-name": "hardy", "upgrade-tool-tarball-url": "http://some/tarball", "upgrade-tool-signature-url": "http://some/sign", "operation-id": 100} result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): self.assertEqual(calls, ["upgrade"]) result.addCallback(check_result) return result def test_handle_release_upgrade_with_already_upgraded_system(self): """ The L{ReleaseUpgrader.handle_release_upgrade} method reports a failure if the system is already running the desired release. """ self.upgrader.lsb_release_filename = self.makeFile( "DISTRIB_CODENAME=karmic\n") message = {"type": "release-upgrade", "code-name": "karmic", "operation-id": 100} result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): self.assertIn("INFO: Queuing message with release upgrade " "failure to exchange urgently.", self.logfile.getvalue()) self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": FAILED, "result-text": "The system is already " "running karmic.", "result-code": 1}]) result.addCallback(check_result) return result def test_handle_release_upgrade_with_abort(self): """ The L{ReleaseUpgrader.handle_release_upgrade} method reports a failure if any of the helper method errbacks. """ self.upgrader.lsb_release_filename = self.makeFile( "DISTRIB_CODENAME=jaunty\n") calls = [] def fetch(tarball_url, signature_url): calls.append("fetch") return succeed(None) def verify(tarball_filename, signature_filename): calls.append("verify") raise Exception("failure") def extract(tarball_filename): calls.append("extract") def tweak(current_code_name): calls.append("extract") def upgrade(code_name, operation_id): calls.append("upgrade") def finish(): calls.append("finish") self.upgrader.fetch = fetch self.upgrader.verify = verify self.upgrader.extract = extract self.upgrader.tweak = tweak self.upgrader.upgrade = upgrade self.upgrader.finish = finish message = {"type": "release-upgrade", "code-name": "karmic", "operation-id": 100, "upgrade-tool-tarball-url": "http://some/tarball", "upgrade-tool-signature-url": "http://some/signature"} result = self.upgrader.handle_release_upgrade(message) def check_result(ignored): self.assertIn("INFO: Queuing message with release upgrade " "failure to exchange urgently.", self.logfile.getvalue()) self.assertMessages(self.get_pending_messages(), [{"type": "operation-result", "operation-id": 100, "status": FAILED, "result-text": "failure", "result-code": 1}]) self.assertEqual(calls, ["fetch", "verify"]) result.addCallback(check_result) return result def test_handle_task(self): """ The L{ReleaseUpgrader.handle_task} method invokes the correct handler for tasks carrying messages of type C{release-upgrade}. """ self.upgrader.handle_release_upgrade = lambda message: message message = {"type": "release-upgrade"} class FakeTask(object): data = message task = FakeTask() self.assertEqual(self.upgrader.handle_task(task), task.data) def test_handle_task_with_wrong_type(self): """ The L{ReleaseUpgrader.handle_task} method doesn't take any action if the message carried by task is not known. """ message = {"type": "foo"} class FakeTask(object): data = message self.assertEqual(self.upgrader.handle_task(FakeTask()), None) def test_main(self): """ The L{main} function creates a new session if the process is not running in its own process group. """ self.mocker.order() run_task_handler = self.mocker.replace("landscape.package.taskhandler" ".run_task_handler", passthrough=False) getpgrp = self.mocker.replace("os.getpgrp") self.expect(getpgrp()).result(os.getpid() + 1) setsid = self.mocker.replace("os.setsid") setsid() run_task_handler(ReleaseUpgrader, ["ARGS"]) self.mocker.result("RESULT") self.mocker.replay() self.assertEqual(main(["ARGS"]), "RESULT") landscape-client-14.01/landscape/package/tests/test_changer.py0000644000175000017500000017360112301414317024254 0ustar andreasandreas# -*- encoding: utf-8 -*- import base64 import time import sys import os from twisted.internet.defer import Deferred from twisted.python.failure import Failure from twisted.internet.error import ProcessTerminated, ProcessDone from landscape.lib.fs import create_file, read_file, touch_file from landscape.package.changer import ( PackageChanger, main, find_changer_command, UNKNOWN_PACKAGE_DATA_TIMEOUT, SUCCESS_RESULT, DEPENDENCY_ERROR_RESULT, POLICY_ALLOW_INSTALLS, POLICY_ALLOW_ALL_CHANGES, ERROR_RESULT) from landscape.package.store import PackageStore from landscape.package.facade import ( DependencyError, TransactionError) from landscape.package.changer import ( PackageChangerConfiguration, ChangePackagesResult) from landscape.tests.mocker import ANY from landscape.tests.helpers import ( LandscapeTest, BrokerServiceHelper, StubProcessFactory) from landscape.package.tests.helpers import ( HASH1, HASH2, HASH3, PKGDEB1, PKGDEB2, AptFacadeHelper, SimpleRepositoryHelper) from landscape.manager.manager import FAILED from landscape.manager.shutdownmanager import ShutdownFailedError from landscape.reactor import FakeReactor class AptPackageChangerTest(LandscapeTest): helpers = [AptFacadeHelper, SimpleRepositoryHelper, BrokerServiceHelper] def setUp(self): super(AptPackageChangerTest, self).setUp() self.store = PackageStore(self.makeFile()) self.config = PackageChangerConfiguration() self.config.data_path = self.makeDir() self.process_factory = StubProcessFactory() self.landscape_reactor = FakeReactor() reboot_required_filename = self.makeFile("reboot required") os.mkdir(self.config.package_directory) os.mkdir(self.config.binaries_path) touch_file(self.config.update_stamp_filename) self.changer = PackageChanger( self.store, self.facade, self.remote, self.config, process_factory=self.process_factory, landscape_reactor=self.landscape_reactor, reboot_required_filename=reboot_required_filename) self.changer.update_notifier_stamp = "/Not/Existing" self.changer.get_session_id() service = self.broker_service service.message_store.set_accepted_types(["change-packages-result", "operation-result"]) def set_pkg1_installed(self): """Return the hash of a package that is installed.""" self._add_system_package("foo") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") return self.facade.get_package_hash(foo) def set_pkg2_satisfied(self): """Return the hash of a package that can be installed.""" self._add_package_to_deb_dir(self.repository_dir, "bar") self.facade.reload_channels() [bar] = self.facade.get_packages_by_name("bar") return self.facade.get_package_hash(bar) def set_pkg1_and_pkg2_satisfied(self): """Make a package depend on another package. Return the hashes of the two packages. """ self._add_package_to_deb_dir( self.repository_dir, "foo", control_fields={"Depends": "bar"}) self._add_package_to_deb_dir(self.repository_dir, "bar") self.facade.reload_channels() [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") return ( self.facade.get_package_hash(foo), self.facade.get_package_hash(bar)) def set_pkg2_upgrades_pkg1(self): """Make it so that one package upgrades another. Return the hashes of the two packages. """ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir(self.repository_dir, "foo", version="2.0") self.facade.reload_channels() foo_1, foo_2 = sorted(self.facade.get_packages_by_name("foo")) return ( self.facade.get_package_hash(foo_1), self.facade.get_package_hash(foo_2)) def remove_pkg2(self): """Remove package name2 from its repository.""" packages_file = os.path.join(self.repository_dir, "Packages") packages_contents = read_file(packages_file) packages_contents = "\n\n".join( [stanza for stanza in packages_contents.split("\n\n") if "Package: name2" not in stanza]) create_file(packages_file, packages_contents) def get_transaction_error_message(self): """Return part of the apt transaction error message.""" return "Unable to correct problems" def get_binaries_channels(self, binaries_path): """Return the channels that will be used for the binaries.""" return [{"baseurl": "file://%s" % binaries_path, "components": "", "distribution": "./", "type": "deb"}] def get_package_name(self, version): """Return the name of the package.""" return version.package.name def disable_clear_channels(self): """Disable clear_channels(), so that it doesn't remove test setup. This is useful for change-packages tests, which will call facade.clear_channels(). Normally that's safe, but since we used the facade to set up channels, we don't want them to be removed. """ self.facade.clear_channels = lambda: None def get_pending_messages(self): return self.broker_service.message_store.get_pending_messages() def replace_perform_changes(self, func): old_perform_changes = self.Facade.perform_changes def reset_perform_changes(Facade): Facade.perform_changes = old_perform_changes self.addCleanup(reset_perform_changes, self.Facade) self.Facade.perform_changes = func def test_unknown_package_id_for_dependency(self): hash1, hash2 = self.set_pkg1_and_pkg2_satisfied() # Let's request an operation that would require an answer with a # must-install field with a package for which the id isn't yet # known by the client. self.store.add_task("changer", {"type": "change-packages", "install": [1], "operation-id": 123}) # In our first try, we should get nothing, because the id of the # dependency (hash2) isn't known. self.store.set_hash_ids({hash1: 1}) result = self.changer.handle_tasks() self.assertEqual(result.called, True) self.assertMessages(self.get_pending_messages(), []) self.assertIn("Package data not yet synchronized with server (%r)" % hash2, self.logfile.getvalue()) # So now we'll set it, and advance the reactor to the scheduled # change detection. We'll get a lot of messages, including the # result of our previous message, which got *postponed*. self.store.set_hash_ids({hash2: 2}) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"must-install": [2], "operation-id": 123, "result-code": 101, "type": "change-packages-result"}]) return result.addCallback(got_result) def test_install_unknown_id(self): self.store.add_task("changer", {"type": "change-packages", "install": [456], "operation-id": 123}) self.changer.handle_tasks() self.assertIn("Package data not yet synchronized with server (456)", self.logfile.getvalue()) self.assertTrue(self.store.get_next_task("changer")) def test_remove_unknown_id(self): self.store.add_task("changer", {"type": "change-packages", "remove": [456], "operation-id": 123}) self.changer.handle_tasks() self.assertIn("Package data not yet synchronized with server (456)", self.logfile.getvalue()) self.assertTrue(self.store.get_next_task("changer")) def test_install_unknown_package(self): self.store.set_hash_ids({"hash": 456}) self.store.add_task("changer", {"type": "change-packages", "install": [456], "operation-id": 123}) self.changer.handle_tasks() self.assertIn("Package data not yet synchronized with server ('hash')", self.logfile.getvalue()) self.assertTrue(self.store.get_next_task("changer")) def test_remove_unknown_package(self): self.store.set_hash_ids({"hash": 456}) self.store.add_task("changer", {"type": "change-packages", "remove": [456], "operation-id": 123}) self.changer.handle_tasks() self.assertIn("Package data not yet synchronized with server ('hash')", self.logfile.getvalue()) self.assertTrue(self.store.get_next_task("changer")) def test_unknown_data_timeout(self): """After a while, unknown package data is reported as an error. In these cases a warning is logged, and the task is removed. """ self.store.add_task("changer", {"type": "change-packages", "remove": [123], "operation-id": 123}) time_mock = self.mocker.replace("time.time") time_mock() self.mocker.result(time.time() + UNKNOWN_PACKAGE_DATA_TIMEOUT) self.mocker.count(1, None) self.mocker.replay() try: result = self.changer.handle_tasks() self.mocker.verify() finally: # Reset it earlier so that Twisted has the true time function. self.mocker.reset() self.assertIn("Package data not yet synchronized with server (123)", self.logfile.getvalue()) def got_result(result): message = {"type": "change-packages-result", "operation-id": 123, "result-code": 100, "result-text": "Package data has changed. " "Please retry the operation."} self.assertMessages(self.get_pending_messages(), [message]) self.assertEqual(self.store.get_next_task("changer"), None) return result.addCallback(got_result) def test_dependency_error(self): """ In this test we hack the facade to simulate the situation where Smart didn't accept to remove the package due to missing dependencies that are present in the system but weren't requested in the message. The client must answer it saying which additional changes are needed to perform the requested operation. It's a slightly hackish approach, since we're returning the full set of packages available as a dependency error, but it serves well for testing this specific feature. """ installed_hash = self.set_pkg1_installed() # Use ensure_channels_reloaded() to make sure that the package # instances we raise below are the same that the facade will # use. The changer will use ensure_channels_reloaded() too, # which won't actually reload the package data if it's already # loaded. self.facade.ensure_channels_reloaded() self.store.set_hash_ids({installed_hash: 1, HASH2: 2, HASH3: 3}) self.store.add_task("changer", {"type": "change-packages", "install": [2], "operation-id": 123}) packages = [ self.facade.get_package_by_hash(pkg_hash) for pkg_hash in [installed_hash, HASH2, HASH3]] def raise_dependency_error(self): raise DependencyError(set(packages)) self.replace_perform_changes(raise_dependency_error) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"must-install": [2, 3], "must-remove": [1], "operation-id": 123, "result-code": 101, "type": "change-packages-result"}]) return result.addCallback(got_result) def test_dependency_error_with_binaries(self): """ Simulate a failing operation involving server-generated binary packages. The extra changes needed to perform the transaction are sent back to the server. """ self.remove_pkg2() installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH3: 3}) self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123}) packages = set() def raise_dependency_error(self): packages.update( self.get_package_by_hash(pkg_hash) for pkg_hash in [installed_hash, HASH2, HASH3]) raise DependencyError(set(packages)) self.replace_perform_changes(raise_dependency_error) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"must-install": [2, 3], "must-remove": [1], "operation-id": 123, "result-code": 101, "type": "change-packages-result"}]) return result.addCallback(got_result) def test_perform_changes_with_allow_install_policy(self): """ The C{POLICY_ALLOW_INSTALLS} policy the makes the changer mark the missing packages for installation. """ self.store.set_hash_ids({HASH1: 1}) self.facade.reload_channels() [package1] = self.facade.get_packages_by_name("name1") self.mocker.order() self.facade.perform_changes = self.mocker.mock() self.facade.perform_changes() self.mocker.throw(DependencyError([package1])) self.facade.mark_install = self.mocker.mock() self.facade.mark_install(package1) self.facade.perform_changes() self.mocker.result("success") self.mocker.replay() result = self.changer.change_packages(POLICY_ALLOW_INSTALLS) self.assertEqual(result.code, SUCCESS_RESULT) self.assertEqual(result.text, "success") self.assertEqual(result.installs, [1]) self.assertEqual(result.removals, []) def test_perform_changes_with_allow_install_policy_and_removals(self): """ The C{POLICY_ALLOW_INSTALLS} policy doesn't allow additional packages to be removed. """ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH2: 2}) self.facade.reload_channels() package1 = self.facade.get_package_by_hash(installed_hash) [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = self.mocker.mock() self.facade.perform_changes() self.mocker.throw(DependencyError([package1, package2])) self.mocker.replay() result = self.changer.change_packages(POLICY_ALLOW_INSTALLS) self.assertEqual(result.code, DEPENDENCY_ERROR_RESULT) self.assertEqual(result.text, None) self.assertEqual(result.installs, [2]) self.assertEqual(result.removals, [1]) def test_perform_changes_with_max_retries(self): """ After having complemented the requested changes to handle a dependency error, the L{PackageChanger.change_packages} will try to perform the requested changes again only once. """ self.store.set_hash_ids({HASH1: 1, HASH2: 2}) self.facade.reload_channels() [package1] = self.facade.get_packages_by_name("name1") [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = self.mocker.mock() self.facade.perform_changes() self.mocker.throw(DependencyError([package1])) self.facade.perform_changes() self.mocker.throw(DependencyError([package2])) self.mocker.replay() result = self.changer.change_packages(POLICY_ALLOW_INSTALLS) self.assertEqual(result.code, DEPENDENCY_ERROR_RESULT) self.assertEqual(result.text, None) self.assertEqual(result.installs, [1, 2]) self.assertEqual(result.removals, []) def test_handle_change_packages_with_policy(self): """ The C{change-packages} message can have an optional C{policy} field that will be passed to the C{perform_changes} method. """ self.store.set_hash_ids({HASH1: 1}) self.store.add_task("changer", {"type": "change-packages", "install": [1], "policy": POLICY_ALLOW_INSTALLS, "operation-id": 123}) self.changer.change_packages = self.mocker.mock() self.changer.change_packages(POLICY_ALLOW_INSTALLS) result = ChangePackagesResult() result.code = SUCCESS_RESULT self.mocker.result(result) self.mocker.replay() self.disable_clear_channels() return self.changer.handle_tasks() def test_perform_changes_with_policy_allow_all_changes(self): """ The C{POLICY_ALLOW_ALL_CHANGES} policy allows any needed additional package to be installed or removed. """ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH2: 2}) self.facade.reload_channels() self.mocker.order() package1 = self.facade.get_package_by_hash(installed_hash) [package2] = self.facade.get_packages_by_name("name2") self.facade.perform_changes = self.mocker.mock() self.facade.perform_changes() self.mocker.throw(DependencyError([package1, package2])) self.facade.mark_install = self.mocker.mock() self.facade.mark_remove = self.mocker.mock() self.facade.mark_install(package2) self.facade.mark_remove(package1) self.facade.perform_changes() self.mocker.result("success") self.mocker.replay() result = self.changer.change_packages(POLICY_ALLOW_ALL_CHANGES) self.assertEqual(result.code, SUCCESS_RESULT) self.assertEqual(result.text, "success") self.assertEqual(result.installs, [2]) self.assertEqual(result.removals, [1]) def test_transaction_error(self): """ In this case, the package we're trying to install declared some dependencies that can't be satisfied in the client because they don't exist at all. The client must answer the request letting the server know about the unsolvable problem. """ self.store.set_hash_ids({HASH1: 1}) self.store.add_task("changer", {"type": "change-packages", "install": [1], "operation-id": 123}) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): result_text = self.get_transaction_error_message() messages = self.get_pending_messages() self.assertEqual(len(messages), 1) message = messages[0] self.assertEqual(message["operation-id"], 123) self.assertEqual(message["result-code"], 100) self.assertIn(result_text, message["result-text"]) self.assertEqual(message["type"], "change-packages-result") return result.addCallback(got_result) def test_tasks_are_isolated_marks(self): """ Changes attempted on one task should be reset before the next task is run. In this test, we try to run two different operations, first installing package 2, then upgrading anything available. The first installation will fail for lack of superuser privileges, and the second one will succeed since there's nothing to upgrade. If tasks are mixed up, the second operation will fail too, because the installation of package 2 is still marked in the facade. """ self.log_helper.ignore_errors(".*dpkg") installable_hash = self.set_pkg2_satisfied() installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, installable_hash: 2}) self.store.add_task("changer", {"type": "change-packages", "install": [2], "operation-id": 123}) self.store.add_task("changer", {"type": "change-packages", "upgrade-all": True, "operation-id": 124}) result = self.changer.handle_tasks() def got_result(result): message = self.get_pending_messages()[1] self.assertEqual(124, message["operation-id"]) self.assertEqual("change-packages-result", message["type"]) self.assertNotEqual(0, message["result-code"]) return result.addCallback(got_result) def test_tasks_are_isolated_cache(self): """ The package (APT) cache should be reset between task runs. In this test, we try to run two different operations, first installing package 2, then removing package 1. Both tasks will fail for lack of superuser privileges. If the package cache isn't reset between tasks, the second operation will fail with a dependency error, since it will be marked for installation, but we haven't explicitly marked it so. """ self.log_helper.ignore_errors(".*dpkg") installable_hash = self.set_pkg2_satisfied() installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, installable_hash: 2}) self.store.add_task("changer", {"type": "change-packages", "install": [2], "operation-id": 123}) self.store.add_task("changer", {"type": "change-packages", "remove": [1], "operation-id": 124}) result = self.changer.handle_tasks() def got_result(result): message1, message2 = self.get_pending_messages() self.assertEqual(123, message1["operation-id"]) self.assertEqual("change-packages-result", message1["type"]) self.assertEqual(ERROR_RESULT, message1["result-code"]) self.assertEqual(124, message2["operation-id"]) self.assertEqual("change-packages-result", message2["type"]) self.assertEqual(ERROR_RESULT, message2["result-code"]) return result.addCallback(got_result) def test_successful_operation(self): """Simulate a *very* successful operation. We'll do that by hacking perform_changes(), and returning our *very* successful operation result. """ installed_hash = self.set_pkg1_installed() self.store.set_hash_ids({installed_hash: 1, HASH2: 2, HASH3: 3}) self.store.add_task("changer", {"type": "change-packages", "install": [2], "operation-id": 123}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "result-text": "Yeah, I did whatever you've " "asked for!", "type": "change-packages-result"}]) return result.addCallback(got_result) def test_successful_operation_with_binaries(self): """ Simulate a successful operation involving server-generated binary packages. """ self.store.set_hash_ids({HASH3: 3}) self.store.add_task("changer", {"type": "change-packages", "install": [2, 3], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "result-text": "Yeah, I did whatever you've " "asked for!", "type": "change-packages-result"}]) return result.addCallback(got_result) def test_global_upgrade(self): """ Besides asking for individual changes, the server may also request the client to perform a global upgrade. This would be the equivalent of a "apt-get upgrade" command being executed in the command line. """ hash1, hash2 = self.set_pkg2_upgrades_pkg1() self.store.set_hash_ids({hash1: 1, hash2: 2}) self.store.add_task("changer", {"type": "change-packages", "upgrade-all": True, "operation-id": 123}) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "must-install": [2], "must-remove": [1], "result-code": 101, "type": "change-packages-result"}]) return result.addCallback(got_result) def test_global_upgrade_with_nothing_to_do(self): self.store.add_task("changer", {"type": "change-packages", "upgrade-all": True, "operation-id": 123}) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "type": "change-packages-result"}]) return result.addCallback(got_result) def test_run_with_no_update_stamp(self): """ If the update-stamp file is not there yet, the package changer just exists. """ os.remove(self.config.update_stamp_filename) def assert_log(ignored): self.assertIn("The package-reporter hasn't run yet, exiting.", self.logfile.getvalue()) result = self.changer.run() return result.addCallback(assert_log) def test_spawn_reporter_after_running(self): output_filename = self.makeFile("REPORTER NOT RUN") reporter_filename = self.makeFile("#!/bin/sh\necho REPORTER RUN > %s" % output_filename) os.chmod(reporter_filename, 0755) find_command_mock = self.mocker.replace( "landscape.package.reporter.find_reporter_command") find_command_mock() self.mocker.result(reporter_filename) self.mocker.replay() # Add a task that will do nothing besides producing an answer. # The reporter is only spawned if at least one task was handled. self.store.add_task("changer", {"type": "change-packages", "operation-id": 123}) result = self.changer.run() def got_result(result): self.assertEqual(open(output_filename).read().strip(), "REPORTER RUN") return result.addCallback(got_result) def test_spawn_reporter_after_running_with_config(self): """The changer passes the config to the reporter when running it.""" self.config.config = "test.conf" output_filename = self.makeFile("REPORTER NOT RUN") reporter_filename = self.makeFile("#!/bin/sh\necho ARGS $@ > %s" % output_filename) os.chmod(reporter_filename, 0755) find_command_mock = self.mocker.replace( "landscape.package.reporter.find_reporter_command") find_command_mock() self.mocker.result(reporter_filename) self.mocker.replay() # Add a task that will do nothing besides producing an answer. # The reporter is only spawned if at least one task was handled. self.store.add_task("changer", {"type": "change-packages", "operation-id": 123}) result = self.changer.run() def got_result(result): self.assertEqual(open(output_filename).read().strip(), "ARGS -c test.conf") return result.addCallback(got_result) def test_set_effective_uid_and_gid_when_running_as_root(self): """ After the package changer has run, we want the package-reporter to run to report the recent changes. If we're running as root, we want to change to the "landscape" user and "landscape" group. """ # We are running as root getuid_mock = self.mocker.replace("os.getuid") getuid_mock() self.mocker.result(0) self.mocker.order() # We want to return a known gid grnam_mock = self.mocker.replace("grp.getgrnam") grnam_mock("landscape") class FakeGroup(object): gr_gid = 199 self.mocker.result(FakeGroup()) # First the changer should change the group setgid_mock = self.mocker.replace("os.setgid") setgid_mock(199) # And a known uid as well pwnam_mock = self.mocker.replace("pwd.getpwnam") pwnam_mock("landscape") class FakeUser(object): pw_uid = 199 self.mocker.result(FakeUser()) # And now the user as well setuid_mock = self.mocker.replace("os.setuid") setuid_mock(199) # Finally, we don't really want the package reporter to run. system_mock = self.mocker.replace("os.system") system_mock(ANY) self.mocker.replay() # Add a task that will do nothing besides producing an answer. # The reporter is only spawned if at least one task was handled. self.store.add_task("changer", {"type": "change-packages", "operation-id": 123}) return self.changer.run() def test_run(self): changer_mock = self.mocker.patch(self.changer) self.mocker.order() results = [Deferred() for i in range(2)] changer_mock.use_hash_id_db() self.mocker.result(results[0]) changer_mock.handle_tasks() self.mocker.result(results[1]) self.mocker.replay() self.changer.run() # It must raise an error because deferreds weren't yet fired. self.assertRaises(AssertionError, self.mocker.verify) for deferred in reversed(results): deferred.callback(None) def test_dont_spawn_reporter_after_running_if_nothing_done(self): output_filename = self.makeFile("REPORTER NOT RUN") reporter_filename = self.makeFile("#!/bin/sh\necho REPORTER RUN > %s" % output_filename) os.chmod(reporter_filename, 0755) find_command_mock = self.mocker.replace( "landscape.package.reporter.find_reporter_command") find_command_mock() self.mocker.result(reporter_filename) self.mocker.count(0, None) self.mocker.replay() result = self.changer.run() def got_result(result): self.assertEqual(open(output_filename).read().strip(), "REPORTER NOT RUN") return result.addCallback(got_result) def test_main(self): self.mocker.order() run_task_handler = self.mocker.replace("landscape.package.taskhandler" ".run_task_handler", passthrough=False) getpgrp = self.mocker.replace("os.getpgrp") self.expect(getpgrp()).result(os.getpid() + 1) setsid = self.mocker.replace("os.setsid") setsid() run_task_handler(PackageChanger, ["ARGS"]) self.mocker.result("RESULT") self.mocker.replay() self.assertEqual(main(["ARGS"]), "RESULT") def test_main_run_from_shell(self): """ We want the getpid and getpgrp to return the same process id this simulates the case where the process is already the process session leader, in this case the os.setsid would fail. """ getpgrp = self.mocker.replace("os.getpgrp") getpgrp() self.mocker.result(os.getpid()) setsid = self.mocker.replace("os.setsid") setsid() self.mocker.count(0, 0) run_task_handler = self.mocker.replace("landscape.package.taskhandler" ".run_task_handler", passthrough=False) run_task_handler(PackageChanger, ["ARGS"]) self.mocker.replay() main(["ARGS"]) def test_find_changer_command(self): dirname = self.makeDir() filename = self.makeFile("", dirname=dirname, basename="landscape-package-changer") saved_argv = sys.argv try: sys.argv = [os.path.join(dirname, "landscape-monitor")] command = find_changer_command() self.assertEqual(command, filename) finally: sys.argv = saved_argv def test_transaction_error_with_unicode_data(self): self.store.set_hash_ids({HASH1: 1}) self.store.add_task("changer", {"type": "change-packages", "install": [1], "operation-id": 123}) def raise_error(self): raise TransactionError(u"áéíóú") self.replace_perform_changes(raise_error) self.disable_clear_channels() result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 100, "result-text": u"áéíóú", "type": "change-packages-result"}]) return result.addCallback(got_result) def test_update_stamp_exists(self): """ L{PackageChanger.update_stamp_exists} returns C{True} if the update-stamp file is there, C{False} otherwise. """ self.assertTrue(self.changer.update_stamp_exists()) os.remove(self.config.update_stamp_filename) self.assertFalse(self.changer.update_stamp_exists()) def test_update_stamp_exists_notifier(self): """ L{PackageChanger.update_stamp_exists} also checks the existence of the C{update_notifier_stamp} file. """ self.assertTrue(self.changer.update_stamp_exists()) os.remove(self.config.update_stamp_filename) self.assertFalse(self.changer.update_stamp_exists()) self.changer.update_notifier_stamp = self.makeFile("") self.assertTrue(self.changer.update_stamp_exists()) def test_binaries_path(self): self.assertEqual( self.config.binaries_path, os.path.join(self.config.data_path, "package", "binaries")) def test_init_channels(self): """ The L{PackageChanger.init_channels} method makes the given Debian packages available in a facade channel. """ binaries = [(HASH1, 111, PKGDEB1), (HASH2, 222, PKGDEB2)] self.facade.reset_channels() self.changer.init_channels(binaries) binaries_path = self.config.binaries_path self.assertFileContent(os.path.join(binaries_path, "111.deb"), base64.decodestring(PKGDEB1)) self.assertFileContent(os.path.join(binaries_path, "222.deb"), base64.decodestring(PKGDEB2)) self.assertEqual( self.facade.get_channels(), self.get_binaries_channels(binaries_path)) self.assertEqual(self.store.get_hash_ids(), {HASH1: 111, HASH2: 222}) self.facade.ensure_channels_reloaded() [pkg1, pkg2] = sorted(self.facade.get_packages(), key=self.get_package_name) self.assertEqual(self.facade.get_package_hash(pkg1), HASH1) self.assertEqual(self.facade.get_package_hash(pkg2), HASH2) def test_init_channels_with_existing_hash_id_map(self): """ The L{PackageChanger.init_channels} behaves well even if the hash->id mapping for a given deb is already in the L{PackageStore}. """ self.store.set_hash_ids({HASH1: 111}) self.changer.init_channels([(HASH1, 111, PKGDEB1)]) self.assertEqual(self.store.get_hash_ids(), {HASH1: 111}) def test_init_channels_with_existing_binaries(self): """ The L{PackageChanger.init_channels} removes Debian packages from previous runs. """ existing_deb_path = os.path.join(self.config.binaries_path, "123.deb") self.makeFile(basename=existing_deb_path, content="foo") self.changer.init_channels([]) self.assertFalse(os.path.exists(existing_deb_path)) def test_binaries_available_in_cache(self): """ If binaries are included in the changes-packages message, those will be added to the facade's cache. """ # Make sure to turn off automatic rereading of Packages file, # like it is by default. self.facade.refetch_package_index = False self.assertEqual(None, self.facade.get_package_by_hash(HASH2)) self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): self.assertNotEqual(None, self.facade.get_package_by_hash(HASH2)) self.assertFalse(self.facade.refetch_package_index) return result.addCallback(got_result) def test_change_package_holds(self): """ The L{PackageChanger.handle_tasks} method appropriately creates and deletes package holds as requested by the C{change-packages} message. """ self._add_system_package("foo") self._add_system_package("bar") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") self._hash_packages_by_name(self.facade, self.store, "bar") [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.set_package_hold(bar) # Make sure that the mtime of the dpkg status file is old when # apt loads it, so that it will be reloaded when asserting the # test result. old_mtime = time.time() - 10 os.utime(self.facade._dpkg_status, (old_mtime, old_mtime)) self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "hold": [foo.package.id], "remove-hold": [bar.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Package holds successfully changed.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_create_package_holds_with_identical_version(self): """ The L{PackageChanger.handle_tasks} method appropriately creates holds as requested by the C{change-packages} message even when versions from two different packages are the same. """ self._add_system_package("foo", version="1.1") self._add_system_package("bar", version="1.1") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") self._hash_packages_by_name(self.facade, self.store, "bar") [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "hold": [foo.package.id, bar.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo", "bar"], self.facade.get_package_holds()) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_delete_package_holds_with_identical_version(self): """ The L{PackageChanger.handle_tasks} method appropriately deletes holds as requested by the C{change-packages} message even when versions from two different packages are the same. """ self._add_system_package("foo", version="1.1") self._add_system_package("bar", version="1.1") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") self._hash_packages_by_name(self.facade, self.store, "bar") [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") self.facade.set_package_hold(foo) self.facade.set_package_hold(bar) self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "remove-hold": [foo.package.id, bar.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_create_already_held(self): """ If the C{change-packages} message requests to add holds for packages that are already held, the activity succeeds, since the end result is that the requested package holds are there. """ self._add_system_package("foo") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") [foo] = self.facade.get_packages_by_name("foo") self.facade.set_package_hold(foo) self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "hold": [foo.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Package holds successfully changed.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_create_other_version_installed(self): """ If the C{change-packages} message requests to add holds for packages that have a different version installed than the one being requested to hold, the activity fails. The whole activity is failed, meaning that other valid hold requests won't get processed. """ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir( self.repository_dir, "foo", version="2.0") self._add_system_package("bar", version="1.0") self._add_package_to_deb_dir( self.repository_dir, "bar", version="2.0") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) [bar1, bar2] = sorted(self.facade.get_packages_by_name("bar")) self.store.set_hash_ids({self.facade.get_package_hash(foo1): 1, self.facade.get_package_hash(foo2): 2, self.facade.get_package_hash(bar1): 3, self.facade.get_package_hash(bar2): 4}) self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "hold": [2, 3], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Cannot perform the changes, since the" + " following packages are not installed: foo", "result-code": 100}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_create_not_installed(self): """ If the C{change-packages} message requests to add holds for packages that aren't installed, the whole activity is failed. If multiple holds are specified, those won't be added. There's no difference between a package that is available in some repository and a package that the facade doesn't know about at all. """ self._add_system_package("foo") self._add_package_to_deb_dir(self.repository_dir, "bar") self._add_package_to_deb_dir(self.repository_dir, "baz") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") self._hash_packages_by_name(self.facade, self.store, "bar") self._hash_packages_by_name(self.facade, self.store, "baz") [foo] = self.facade.get_packages_by_name("foo") [bar] = self.facade.get_packages_by_name("bar") [baz] = self.facade.get_packages_by_name("baz") self.store.add_task("changer", {"type": "change-packages", "hold": [foo.package.id, bar.package.id, baz.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Cannot perform the changes, since the " "following packages are not installed: " "%s, %s" % tuple(sorted([bar.package.name, baz.package.name])), "result-code": 100}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_create_unknown_hash(self): """ If the C{change-packages} message requests to add holds for packages that the client doesn't know about results in a not yet synchronized message and a failure of the operation. """ self.store.add_task("changer", {"type": "change-packages", "hold": [123], "operation-id": 123}) time_mock = self.mocker.replace("time.time") time_mock() self.mocker.result(time.time() + UNKNOWN_PACKAGE_DATA_TIMEOUT) self.mocker.count(1, None) self.mocker.replay() try: result = self.changer.handle_tasks() self.mocker.verify() finally: # Reset it earlier so that Twisted has the true time function. self.mocker.reset() self.assertIn("Package data not yet synchronized with server (123)", self.logfile.getvalue()) def got_result(result): message = {"type": "change-packages-result", "operation-id": 123, "result-code": 100, "result-text": "Package data has changed. " "Please retry the operation."} self.assertMessages(self.get_pending_messages(), [message]) self.assertEqual(self.store.get_next_task("changer"), None) return result.addCallback(got_result) def test_change_package_holds_delete_not_held(self): """ If the C{change-packages} message requests to remove holds for packages that aren't held, the activity succeeds if the right version is installed, since the end result is that the hold is removed. """ self._add_package_to_deb_dir(self.repository_dir, "foo") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") [foo] = self.facade.get_packages_by_name("foo") self.store.add_task("changer", {"type": "change-packages", "remove-hold": [foo.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Package holds successfully changed.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_delete_different_version_held(self): """ If the C{change-packages} message requests to remove holds for packages that aren't held, the activity succeeds if the right version is installed, since the end result is that the hold is removed. """ self._add_system_package("foo", version="1.0") self._add_package_to_deb_dir( self.repository_dir, "foo", version="2.0") self.facade.reload_channels() [foo1, foo2] = sorted(self.facade.get_packages_by_name("foo")) self.store.set_hash_ids({self.facade.get_package_hash(foo1): 1, self.facade.get_package_hash(foo2): 2}) self.facade.mark_install(foo1) self.facade.mark_hold(foo1) self.facade.perform_changes() self.facade.reload_channels() self.store.add_task("changer", {"type": "change-packages", "remove-hold": [2], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual(["foo"], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Package holds successfully changed.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_holds_delete_not_installed(self): """ If the C{change-packages} message requests to remove holds for packages that aren't installed, the activity succeeds, since the end result is still that the package isn't held at the requested version. """ self._add_system_package("foo") self.facade.reload_channels() self._hash_packages_by_name(self.facade, self.store, "foo") [foo] = self.facade.get_packages_by_name("foo") self.store.add_task("changer", {"type": "change-packages", "remove-hold": [foo.package.id], "operation-id": 123}) def assert_result(result): self.facade.reload_channels() self.assertEqual([], self.facade.get_package_holds()) self.assertIn("Queuing response with change package results " "to exchange urgently.", self.logfile.getvalue()) self.assertMessages( self.get_pending_messages(), [{"type": "change-packages-result", "operation-id": 123, "result-text": "Package holds successfully changed.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_package_locks(self): """ The L{PackageChanger.handle_tasks} method fails change-package-locks activities, since it can't add or remove locks because apt doesn't support this. """ self.store.add_task("changer", {"type": "change-package-locks", "create": [("foo", ">=", "1.0")], "delete": [("bar", None, None)], "operation-id": 123}) def assert_result(result): self.assertMessages( self.get_pending_messages(), [{"type": "operation-result", "operation-id": 123, "status": FAILED, "result-text": "This client doesn't support package locks.", "result-code": 1}]) result = self.changer.handle_tasks() return result.addCallback(assert_result) def test_change_packages_with_binaries_removes_binaries(self): """ After the C{change-packages} handler has installed the binaries, the binaries and the internal facade deb source is removed. """ self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "result-text": "Yeah, I did whatever you've " "asked for!", "type": "change-packages-result"}]) self.assertEqual([], os.listdir(self.config.binaries_path)) self.assertFalse( os.path.exists(self.facade._get_internal_sources_list())) return result.addCallback(got_result) def test_change_packages_with_reboot_flag(self): """ When a C{reboot-if-necessary} flag is passed in the C{change-packages}, A C{ShutdownProtocolProcess} is created and the package result change is returned. """ self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123, "reboot-if-necessary": True}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): self.assertIn("Landscape is rebooting the system", self.logfile.getvalue()) self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "result-text": "Yeah, I did whatever you've " "asked for!", "type": "change-packages-result"}]) self.landscape_reactor.advance(5) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.processEnded(Failure(ProcessDone(status=0))) self.broker_service.reactor.advance(100) self.landscape_reactor.advance(10) return result.addCallback(got_result) def test_change_packages_with_failed_reboot(self): """ When a C{reboot-if-necessary} flag is passed in the C{change-packages}, A C{ShutdownProtocol} is created and the package result change is returned, even if the reboot fails. """ self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123, "reboot-if-necessary": True}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): self.assertMessages(self.get_pending_messages(), [{"operation-id": 123, "result-code": 1, "result-text": "Yeah, I did whatever you've " "asked for!", "type": "change-packages-result"}]) self.log_helper.ignore_errors(ShutdownFailedError) self.landscape_reactor.advance(5) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.processEnded(Failure(ProcessTerminated(exitCode=1))) self.landscape_reactor.advance(10) return result.addCallback(got_result) def test_no_exchange_after_reboot(self): """ After initiating a reboot process, no more messages are exchanged. """ self.store.add_task("changer", {"type": "change-packages", "install": [2], "binaries": [(HASH2, 2, PKGDEB2)], "operation-id": 123, "reboot-if-necessary": True}) def return_good_result(self): return "Yeah, I did whatever you've asked for!" self.replace_perform_changes(return_good_result) result = self.changer.handle_tasks() def got_result(result): # Advance both reactors so the pending messages are exchanged. self.broker_service.reactor.advance(100) self.landscape_reactor.advance(10) payloads = self.broker_service.exchanger._transport.payloads self.assertEqual(0, len(payloads)) self.landscape_reactor.advance(5) [arguments] = self.process_factory.spawns protocol = arguments[0] protocol.processEnded(Failure(ProcessDone(status=0))) self.broker_service.reactor.advance(100) self.landscape_reactor.advance(10) return result.addCallback(got_result) def test_run_gets_session_id(self): """ Invoking L{PackageChanger.run} results in the session ID being fetched. """ def assert_session_id(ignored): self.assertTrue(self.changer._session_id is not None) self.changer._session_id = None result = self.changer.run() return result.addCallback(assert_session_id) landscape-client-14.01/landscape/package/tests/__init__.py0000644000175000017500000000000012301414317023323 0ustar andreasandreaslandscape-client-14.01/landscape/package/tests/test_skeleton.py0000644000175000017500000003012512301414317024462 0ustar andreasandreasfrom landscape.package.skeleton import ( build_skeleton_apt, DEB_PROVIDES, DEB_PACKAGE, DEB_NAME_PROVIDES, DEB_REQUIRES, DEB_OR_REQUIRES, DEB_UPGRADES, DEB_CONFLICTS, PackageSkeleton) from landscape.package.tests.helpers import ( AptFacadeHelper, HASH1, create_simple_repository, create_deb, PKGNAME_MINIMAL, PKGDEB_MINIMAL, HASH_MINIMAL, PKGNAME_SIMPLE_RELATIONS, PKGDEB_SIMPLE_RELATIONS, HASH_SIMPLE_RELATIONS, PKGNAME_VERSION_RELATIONS, PKGDEB_VERSION_RELATIONS, HASH_VERSION_RELATIONS, PKGNAME_MULTIPLE_RELATIONS, PKGDEB_MULTIPLE_RELATIONS, HASH_MULTIPLE_RELATIONS, PKGNAME_OR_RELATIONS, PKGDEB_OR_RELATIONS, HASH_OR_RELATIONS) from landscape.tests.helpers import LandscapeTest class SkeletonTestHelper(object): """A helper to set up a repository for the skeleton tests.""" def set_up(self, test_case): test_case.skeleton_repository_dir = test_case.makeDir() create_simple_repository(test_case.skeleton_repository_dir) create_deb( test_case.skeleton_repository_dir, PKGNAME_MINIMAL, PKGDEB_MINIMAL) create_deb( test_case.skeleton_repository_dir, PKGNAME_SIMPLE_RELATIONS, PKGDEB_SIMPLE_RELATIONS) create_deb( test_case.skeleton_repository_dir, PKGNAME_VERSION_RELATIONS, PKGDEB_VERSION_RELATIONS) create_deb( test_case.skeleton_repository_dir, PKGNAME_MULTIPLE_RELATIONS, PKGDEB_MULTIPLE_RELATIONS) create_deb( test_case.skeleton_repository_dir, PKGNAME_OR_RELATIONS, PKGDEB_OR_RELATIONS) class SkeletonAptTest(LandscapeTest): """C{PackageSkeleton} tests for apt packages.""" helpers = [AptFacadeHelper, SkeletonTestHelper] def setUp(self): super(SkeletonAptTest, self).setUp() self.facade.add_channel_deb_dir(self.skeleton_repository_dir) # Don't use reload_channels(), since that causes the test setup # depending on build_skeleton_apt working correctly, which makes # it harder to do TDD for these tests. self.facade._cache.open(None) self.facade._cache.update(None) self.facade._cache.open(None) def get_package(self, name): """Return the package with the specified name.""" # Don't use get_packages(), since that causes the test setup # depending on build_skeleton_apt working correctly, which makes # it harder to to TDD for these tests. package = self.facade._cache[name] return package.candidate def build_skeleton(self, *args, **kwargs): """Build the skeleton to be tested.""" return build_skeleton_apt(*args, **kwargs) def test_build_skeleton(self): """ C{build_skeleton} builds a C{PackageSkeleton} from a package. If with_info isn't passed, C{section}, C{summary}, C{description}, C{size} and C{installed_size} will be C{None}. """ pkg1 = self.get_package("name1") skeleton = self.build_skeleton(pkg1) self.assertEqual("name1", skeleton.name) self.assertEqual("version1-release1", skeleton.version) self.assertEqual(None, skeleton.section) self.assertEqual(None, skeleton.summary) self.assertEqual(None, skeleton.description) self.assertEqual(None, skeleton.size) self.assertEqual(None, skeleton.installed_size) relations = [ (DEB_PROVIDES, "providesname1"), (DEB_NAME_PROVIDES, "name1 = version1-release1"), (DEB_REQUIRES, "prerequirename1 = prerequireversion1"), (DEB_REQUIRES, "requirename1 = requireversion1"), (DEB_UPGRADES, "name1 < version1-release1"), (DEB_CONFLICTS, "conflictsname1 = conflictsversion1")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH1, skeleton.get_hash(), HASH1) def test_build_skeleton_without_unicode(self): """ If C{with_unicode} isn't passed to C{build_skeleton}, the name and version of the skeleton are byte strings. The hash doesn't change, though. """ pkg1 = self.get_package("name1") skeleton = self.build_skeleton(pkg1) self.assertTrue(isinstance(skeleton.name, str)) self.assertTrue(isinstance(skeleton.version, str)) self.assertEqual(HASH1, skeleton.get_hash()) def test_build_skeleton_with_unicode(self): """ If C{with_unicode} is passed to C{build_skeleton}, the name and version of the skeleton are unicode strings. """ pkg1 = self.get_package("name1") skeleton = self.build_skeleton(pkg1, with_unicode=True) self.assertTrue(isinstance(skeleton.name, unicode)) self.assertTrue(isinstance(skeleton.version, unicode)) self.assertEqual(HASH1, skeleton.get_hash()) def test_build_skeleton_with_info(self): """ If C{with_info} is passed to C{build_skeleton}, C{section}, C{summary}, C{description} and the size fields will be extracted from the package. """ pkg1 = self.get_package("name1") skeleton = self.build_skeleton(pkg1, with_info=True) self.assertEqual("Group1", skeleton.section) self.assertEqual("Summary1", skeleton.summary) self.assertEqual("Description1", skeleton.description) self.assertEqual(1038, skeleton.size) self.assertEqual(28672, skeleton.installed_size) def test_build_skeleton_with_unicode_and_extra_info(self): """ If C{with_unicode} and C{with_info} are passed to C{build_skeleton}, the name, version and the extra info of the skeleton are unicode strings. """ pkg1 = self.get_package("name1") skeleton = self.build_skeleton(pkg1, with_unicode=True, with_info=True) self.assertTrue(isinstance(skeleton.name, unicode)) self.assertTrue(isinstance(skeleton.version, unicode)) self.assertTrue(isinstance(skeleton.section, unicode)) self.assertTrue(isinstance(skeleton.summary, unicode)) self.assertTrue(isinstance(skeleton.description, unicode)) self.assertEqual(HASH1, skeleton.get_hash()) def test_build_skeleton_minimal(self): """ A package that has only the required fields will still have some relations defined. """ minimal_package = self.get_package("minimal") skeleton = self.build_skeleton(minimal_package) self.assertEqual("minimal", skeleton.name) self.assertEqual("1.0", skeleton.version) self.assertEqual(None, skeleton.section) self.assertEqual(None, skeleton.summary) self.assertEqual(None, skeleton.description) self.assertEqual(None, skeleton.size) self.assertEqual(None, skeleton.installed_size) relations = [ (DEB_NAME_PROVIDES, "minimal = 1.0"), (DEB_UPGRADES, "minimal < 1.0")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_MINIMAL, skeleton.get_hash()) def test_build_skeleton_minimal_with_info(self): """ If some fields that C{with_info} wants aren't there, they will be either an empty string or None, depending on which field. """ package = self.get_package("minimal") skeleton = self.build_skeleton(package, True) self.assertEqual("", skeleton.section) self.assertEqual( "A minimal package with no dependencies or other relations.", skeleton.summary) self.assertEqual("", skeleton.description) self.assertEqual(558, skeleton.size) self.assertEqual(None, skeleton.installed_size) def test_build_skeleton_simple_relations(self): """ Relations that are specified in the package control file can be simple, i.e. not specifying a version. """ package = self.get_package("simple-relations") skeleton = self.build_skeleton(package) self.assertEqual("simple-relations", skeleton.name) self.assertEqual("1.0", skeleton.version) relations = [ (DEB_PROVIDES, "provide1"), (DEB_NAME_PROVIDES, "simple-relations = 1.0"), (DEB_REQUIRES, "depend1"), (DEB_REQUIRES, "predepend1"), (DEB_UPGRADES, "simple-relations < 1.0"), (DEB_CONFLICTS, "break1"), (DEB_CONFLICTS, "conflict1")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_SIMPLE_RELATIONS, skeleton.get_hash()) def test_build_skeleton_version_relations(self): """ Relations that are specified in the package control file can be version dependent. """ package = self.get_package("version-relations") skeleton = self.build_skeleton(package) self.assertEqual("version-relations", skeleton.name) self.assertEqual("1.0", skeleton.version) relations = [ (DEB_PROVIDES, "provide1"), (DEB_NAME_PROVIDES, "version-relations = 1.0"), (DEB_REQUIRES, "depend1 = 2.0"), (DEB_REQUIRES, "predepend1 <= 2.0"), (DEB_UPGRADES, "version-relations < 1.0"), (DEB_CONFLICTS, "break1 > 2.0"), (DEB_CONFLICTS, "conflict1 < 2.0")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_VERSION_RELATIONS, skeleton.get_hash()) def test_build_skeleton_multiple_relations(self): """ The relations in the package control can have multiple values. In that case, one relation for each value is created in the skeleton. """ package = self.get_package("multiple-relations") skeleton = self.build_skeleton(package) self.assertEqual("multiple-relations", skeleton.name) self.assertEqual("1.0", skeleton.version) relations = [ (DEB_PROVIDES, "provide1"), (DEB_PROVIDES, "provide2"), (DEB_NAME_PROVIDES, "multiple-relations = 1.0"), (DEB_REQUIRES, "depend1 = 2.0"), (DEB_REQUIRES, "depend2"), (DEB_REQUIRES, "predepend1 <= 2.0"), (DEB_REQUIRES, "predepend2"), (DEB_OR_REQUIRES, "depend3 | depend4 > 2.0"), (DEB_UPGRADES, "multiple-relations < 1.0"), (DEB_CONFLICTS, "break1 > 2.0"), (DEB_CONFLICTS, "break2"), (DEB_CONFLICTS, "conflict1 < 2.0"), (DEB_CONFLICTS, "conflict2")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_MULTIPLE_RELATIONS, skeleton.get_hash()) def test_build_skeleton_or_relations(self): """ The Depend and Pre-Depend fields can have an or relation. That is considered to be a single relation, with a special type. """ package = self.get_package("or-relations") skeleton = self.build_skeleton(package) self.assertEqual("or-relations", skeleton.name) self.assertEqual("1.0", skeleton.version) relations = [ (DEB_NAME_PROVIDES, "or-relations = 1.0"), (DEB_OR_REQUIRES, "depend1 = 2.0 | depend2"), (DEB_OR_REQUIRES, "predepend1 <= 2.0 | predepend2"), (DEB_UPGRADES, "or-relations < 1.0")] self.assertEqual(relations, skeleton.relations) self.assertEqual(HASH_OR_RELATIONS, skeleton.get_hash()) class SkeletonTest(LandscapeTest): def test_skeleton_set_hash(self): """ If the hash is explictly set using C{set_hash}, C{get_hash} won't recalculate the hash. """ skeleton = PackageSkeleton(DEB_PACKAGE, "package", "1.0") skeleton.set_hash("explicit-hash") self.assertEqual("explicit-hash", skeleton.get_hash()) def test_skeleton_unset_hash(self): """ If the hash is explictly set using C{set_hash}, it can be unset again by passing in None, which means that C{get_hash} will recalculate the hash again. """ skeleton = PackageSkeleton(DEB_PACKAGE, "package", "1.0") calculated_hash = skeleton.get_hash() skeleton.set_hash("explicit-hash") skeleton.set_hash(None) self.assertEqual(calculated_hash, skeleton.get_hash()) landscape-client-14.01/landscape/package/tests/test_taskhandler.py0000644000175000017500000004242612301414317025145 0ustar andreasandreasimport os from twisted.internet.defer import Deferred, fail from landscape.lib.lock import lock_path from landscape.reactor import LandscapeReactor, FakeReactor from landscape.broker.amp import RemoteBrokerConnector from landscape.package.taskhandler import ( PackageTaskHandlerConfiguration, PackageTaskHandler, run_task_handler, LazyRemoteBroker) from landscape.package.facade import AptFacade from landscape.package.store import HashIdStore, PackageStore from landscape.package.tests.helpers import AptFacadeHelper from landscape.tests.helpers import ( LandscapeTest, BrokerServiceHelper, EnvironSaverHelper) from landscape.tests.mocker import ANY, ARGS, MATCH def ISTYPE(match_type): return MATCH(lambda arg: type(arg) is match_type) SAMPLE_LSB_RELEASE = "DISTRIB_CODENAME=codename\n" class PackageTaskHandlerConfigurationTest(LandscapeTest): def test_update_stamp_option(self): """ L{PackageReporterConfiguration.update_stamp_filename} points to the update-stamp file. """ config = PackageTaskHandlerConfiguration() self.assertEqual( config.update_stamp_filename, "/var/lib/landscape/client/package/update-stamp") class PackageTaskHandlerTest(LandscapeTest): helpers = [AptFacadeHelper, EnvironSaverHelper, BrokerServiceHelper] def setUp(self): super(PackageTaskHandlerTest, self).setUp() self.config = PackageTaskHandlerConfiguration() self.store = PackageStore(self.makeFile()) self.handler = PackageTaskHandler( self.store, self.facade, self.remote, self.config) def test_use_hash_id_db(self): # We don't have this hash=>id mapping self.assertEqual(self.store.get_hash_id("hash"), None) # An appropriate hash=>id database is available self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) hash_id_db_filename = os.path.join(self.config.data_path, "package", "hash-id", "uuid_codename_arch") HashIdStore(hash_id_db_filename).set_hash_ids({"hash": 123}) # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.handler.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Attach the hash=>id database to our store self.mocker.replay() result = self.handler.use_hash_id_db() # Now we do have the hash=>id mapping def callback(ignored): self.assertEqual(self.store.get_hash_id("hash"), 123) result.addCallback(callback) return result def test_use_hash_id_db_undetermined_codename(self): # Fake uuid message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") # Undetermined codename self.handler.lsb_release_filename = self.makeFile("Foo=bar") # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "missing code-name key in %s" % self.handler.lsb_release_filename) self.mocker.result(None) # Go! self.mocker.replay() result = self.handler.use_hash_id_db() return result def test_use_hash_id_db_wit_non_existing_lsb_release(self): # Fake uuid message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") # Undetermined codename self.handler.lsb_release_filename = self.makeFile() # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "[Errno 2] No such file or directory: '%s'" % self.handler.lsb_release_filename) self.mocker.result(None) # Go! self.mocker.replay() result = self.handler.use_hash_id_db() return result def test_wb_determine_hash_id_db_filename_server_uuid_is_none(self): """ The L{PaclageTaskHandler._determine_hash_id_db_filename} method should return C{None} if the server uuid is C{None}. """ message_store = self.broker_service.message_store message_store.set_server_uuid(None) result = self.handler._determine_hash_id_db_filename() def callback(hash_id_db_filename): self.assertIs(hash_id_db_filename, None) result.addCallback(callback) return result def test_use_hash_id_db_undetermined_server_uuid(self): """ If the server-uuid can't be determined for some reason, no hash-id db should be used and the failure should be properly logged. """ message_store = self.broker_service.message_store message_store.set_server_uuid(None) logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: " "server UUID not available") self.mocker.result(None) self.mocker.replay() result = self.handler.use_hash_id_db() def callback(ignore): self.assertFalse(self.store.has_hash_id_db()) result.addCallback(callback) return result def test_get_session_id(self): """ L{get_session_id} returns a session ID. """ def assertHaveSessionId(session_id): self.assertTrue(session_id is not None) result = self.handler.get_session_id() result.addCallback(assertHaveSessionId) return result def test_use_hash_id_db_undetermined_arch(self): # Fake uuid and codename message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.handler.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) # Undetermined arch self.facade.set_arch(None) # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Couldn't determine which hash=>id database to use: "\ "unknown dpkg architecture") self.mocker.result(None) # Go! self.mocker.replay() result = self.handler.use_hash_id_db() return result def test_use_hash_id_db_database_not_found(self): # Clean path, we don't have an appropriate hash=>id database self.config.data_path = self.makeDir() # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.handler.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # Let's try self.mocker.replay() result = self.handler.use_hash_id_db() # We go on without the hash=>id database def callback(ignored): self.assertFalse(self.store.has_hash_id_db()) result.addCallback(callback) return result def test_use_hash_id_with_invalid_database(self): # Let's say the appropriate database is actually garbage self.config.data_path = self.makeDir() os.makedirs(os.path.join(self.config.data_path, "package", "hash-id")) hash_id_db_filename = os.path.join(self.config.data_path, "package", "hash-id", "uuid_codename_arch") open(hash_id_db_filename, "w").write("junk") # Fake uuid, codename and arch message_store = self.broker_service.message_store message_store.set_server_uuid("uuid") self.handler.lsb_release_filename = self.makeFile(SAMPLE_LSB_RELEASE) self.facade.set_arch("arch") # The failure should be properly logged logging_mock = self.mocker.replace("logging.warning") logging_mock("Invalid hash=>id database %s" % hash_id_db_filename) self.mocker.result(None) # Try to attach it self.mocker.replay() result = self.handler.use_hash_id_db() # We remove the broken hash=>id database and go on without it def callback(ignored): self.assertFalse(os.path.exists(hash_id_db_filename)) self.assertFalse(self.store.has_hash_id_db()) result.addCallback(callback) return result def test_run(self): handler_mock = self.mocker.patch(self.handler) handler_mock.handle_tasks() self.mocker.result("WAYO!") self.mocker.replay() self.assertEqual(self.handler.run(), "WAYO!") def test_handle_tasks(self): queue_name = PackageTaskHandler.queue_name self.store.add_task(queue_name, 0) self.store.add_task(queue_name, 1) self.store.add_task(queue_name, 2) results = [Deferred() for i in range(3)] stash = [] def handle_task(task): result = results[task.data] result.addCallback(lambda x: stash.append(task.data)) return result handler_mock = self.mocker.patch(self.handler) handler_mock.handle_task(ANY) self.mocker.call(handle_task) self.mocker.count(3) self.mocker.replay() handle_tasks_result = self.handler.handle_tasks() self.assertEqual(stash, []) results[1].callback(None) self.assertEqual(stash, []) self.assertEqual(self.store.get_next_task(queue_name).data, 0) results[0].callback(None) self.assertEqual(stash, [0, 1]) self.assertTrue(handle_tasks_result.called) self.assertEqual(self.store.get_next_task(queue_name).data, 2) results[2].callback(None) self.assertEqual(stash, [0, 1, 2]) self.assertTrue(handle_tasks_result.called) self.assertEqual(self.store.get_next_task(queue_name), None) handle_tasks_result = self.handler.handle_tasks() self.assertTrue(handle_tasks_result.called) def test_handle_tasks_hooks_errback(self): queue_name = PackageTaskHandler.queue_name self.store.add_task(queue_name, 0) class MyException(Exception): pass def handle_task(task): result = Deferred() result.errback(MyException()) return result handler_mock = self.mocker.patch(self.handler) handler_mock.handle_task(ANY) self.mocker.call(handle_task) self.mocker.replay() stash = [] handle_tasks_result = self.handler.handle_tasks() handle_tasks_result.addErrback(stash.append) self.assertEqual(len(stash), 1) self.assertEqual(stash[0].type, MyException) def test_default_handle_task(self): result = self.handler.handle_task(None) self.assertTrue(isinstance(result, Deferred)) self.assertTrue(result.called) def _mock_run_task_handler(self): """ Mock the different parts of run_task_handler(), to ensure it does what it's supposed to do, without actually creating files and starting processes. """ # This is a slightly lengthy one, so bear with me. # Prepare the mock objects. lock_path_mock = self.mocker.replace("landscape.lib.lock.lock_path", passthrough=False) init_logging_mock = self.mocker.replace("landscape.deployment" ".init_logging", passthrough=False) reactor_mock = self.mocker.patch(LandscapeReactor) connector_mock = self.mocker.patch(RemoteBrokerConnector) HandlerMock = self.mocker.proxy(PackageTaskHandler) # The goal of this method is to perform a sequence of tasks # where the ordering is important. self.mocker.order() # First, we must acquire a lock as the same task handler should # never have two instances running in parallel. The 'default' # below comes from the queue_name attribute. lock_path_mock(os.path.join(self.data_path, "package", "default.lock")) # Once locking is done, it's safe to start logging without # corrupting the file. We don't want any output unless it's # breaking badly, so the quiet option should be set. init_logging_mock(ISTYPE(PackageTaskHandlerConfiguration), "package-task-handler") # We also expect the umask to be set appropriately before running the # commands umask = self.mocker.replace("os.umask") umask(022) handler_args = [] HandlerMock(ANY, ANY, ANY, ANY) self.mocker.passthrough() # Let the real constructor run for testing. self.mocker.call(lambda *args: handler_args.extend(args)) call_when_running = [] reactor_mock.call_when_running(ANY) self.mocker.call(lambda f: call_when_running.append(f)) reactor_mock.run() self.mocker.call(lambda: call_when_running[0]()) connector_mock.disconnect() reactor_mock.call_later(0, ANY) self.mocker.result(None) # Okay, the whole playground is set. self.mocker.replay() return HandlerMock, handler_args def test_run_task_handler(self): """ The L{run_task_handler} function creates and runs the given task handler with the proper arguments. """ HandlerMock, handler_args = self._mock_run_task_handler() def assert_task_handler(ignored): store, facade, broker, config = handler_args try: # Verify the arguments passed to the reporter constructor. self.assertEqual(type(store), PackageStore) self.assertEqual(type(facade), AptFacade) self.assertEqual(type(broker), LazyRemoteBroker) self.assertEqual(type(config), PackageTaskHandlerConfiguration) # Let's see if the store path is where it should be. filename = os.path.join(self.data_path, "package", "database") store.add_available([1, 2, 3]) other_store = PackageStore(filename) self.assertEqual(other_store.get_available(), [1, 2, 3]) # Check the hash=>id database directory as well self.assertTrue(os.path.exists( os.path.join(self.data_path, "package", "hash-id"))) finally: # Put reactor back in place before returning. self.mocker.reset() result = run_task_handler(HandlerMock, ["-c", self.config_filename]) return result.addCallback(assert_task_handler) def test_run_task_handler_when_already_locked(self): lock_path(os.path.join(self.data_path, "package", "default.lock")) try: run_task_handler(PackageTaskHandler, ["-c", self.config_filename]) except SystemExit, e: self.assertIn("default is already running", str(e)) else: self.fail("SystemExit not raised") def test_run_task_handler_when_already_locked_and_quiet_option(self): lock_path(os.path.join(self.data_path, "package", "default.lock")) try: run_task_handler(PackageTaskHandler, ["-c", self.config_filename, "--quiet"]) except SystemExit, e: self.assertEqual(str(e), "") else: self.fail("SystemExit not raised") def test_errors_in_tasks_are_printed_and_exit_program(self): # Ignore a bunch of crap that we don't care about init_logging_mock = self.mocker.replace("landscape.deployment" ".init_logging", passthrough=False) init_logging_mock(ARGS) class MyException(Exception): pass self.log_helper.ignore_errors(MyException) # Simulate a task handler which errors out. handler_factory_mock = self.mocker.proxy(PackageTaskHandler) handler_mock = handler_factory_mock(ARGS) self.expect(handler_mock.run()).result(fail(MyException("Hey error"))) self.mocker.replay() # Ok now for some real stuff def assert_log(ignored): self.assertIn("MyException", self.logfile.getvalue()) result = run_task_handler(handler_factory_mock, ["-c", self.config_filename], reactor=FakeReactor()) return result.addCallback(assert_log) class LazyRemoteBrokerTest(LandscapeTest): helpers = [BrokerServiceHelper] def test_wb_is_lazy(self): """ The L{LazyRemoteBroker} class doesn't initialize the actual remote broker until one of its attributes gets actually accessed. """ reactor = FakeReactor() connector = RemoteBrokerConnector(reactor, self.broker_service.config) self.broker = LazyRemoteBroker(connector) self.assertIs(self.broker._remote, None) def close_connection(result): self.assertTrue(result) connector.disconnect() result = self.broker.ping() return result.addCallback(close_connection) landscape-client-14.01/landscape/package/__init__.py0000644000175000017500000000000012301414317022161 0ustar andreasandreaslandscape-client-14.01/landscape/package/taskhandler.py0000644000175000017500000002441412301414317022741 0ustar andreasandreasimport os import re import logging from twisted.internet.defer import succeed, Deferred from landscape.lib.lock import lock_path, LockError from landscape.lib.log import log_failure from landscape.lib.lsb_release import LSB_RELEASE_FILENAME, parse_lsb_release from landscape.reactor import LandscapeReactor from landscape.deployment import Configuration, init_logging from landscape.package.store import PackageStore, InvalidHashIdDb from landscape.broker.amp import RemoteBrokerConnector class PackageTaskError(Exception): """Raised when a task hasn't been successfully completed.""" class PackageTaskHandlerConfiguration(Configuration): """Specialized configuration for L{PackageTaskHandler}s.""" @property def package_directory(self): """Get the path to the package directory.""" return os.path.join(self.data_path, "package") @property def store_filename(self): """Get the path to the SQlite file for the L{PackageStore}.""" return os.path.join(self.package_directory, "database") @property def hash_id_directory(self): """Get the path to the directory holding the stock hash-id stores.""" return os.path.join(self.package_directory, "hash-id") @property def update_stamp_filename(self): """Get the path to the update-stamp file.""" return os.path.join(self.package_directory, "update-stamp") @property def detect_package_changes_stamp(self): """Get the path to the stamp marking when the last time we checked for changes in the packages was.""" return os.path.join(self.data_path, "detect_package_changes_timestamp") class LazyRemoteBroker(object): """Wrapper class around L{RemoteBroker} providing lazy initialization. This class is a wrapper around a regular L{RemoteBroker}. It connects to the remote broker object only when one of its attributes is first accessed. @param connector: The L{RemoteBrokerConnector} which will be used to connect to the broker. @note: This behaviour is needed in particular by the ReleaseUpgrader and the PackageChanger, because if the they connect early and the landscape-client package gets upgraded while they run, they will lose the connection and will not be able to reconnect for a potentially long window of time (till the new landscape-client package version is fully configured and the service is started again). """ def __init__(self, connector): self._connector = connector self._remote = None def __getattr__(self, method): if self._remote: return getattr(self._remote, method) def wrapper(*args, **kwargs): def got_connection(remote): self._remote = remote return getattr(self._remote, method)(*args, **kwargs) result = self._connector.connect() return result.addCallback(got_connection) return wrapper class PackageTaskHandler(object): config_factory = PackageTaskHandlerConfiguration queue_name = "default" lsb_release_filename = LSB_RELEASE_FILENAME package_store_class = PackageStore # This file is touched after every succesful 'apt-get update' run if the # update-notifier-common package is installed. update_notifier_stamp = "/var/lib/apt/periodic/update-success-stamp" def __init__(self, package_store, package_facade, remote_broker, config): self._store = package_store self._facade = package_facade self._broker = remote_broker self._config = config self._count = 0 self._session_id = None def run(self): return self.handle_tasks() def handle_tasks(self): """Handle the tasks in the queue. The tasks will be handed over one by one to L{handle_task} until the queue is empty or a task fails. @see: L{handle_tasks} """ return self._handle_next_task(None) def _handle_next_task(self, result, last_task=None): """Pick the next task from the queue and pass it to C{handle_task}.""" if last_task is not None: # Last task succeeded. We can safely kill it now. last_task.remove() self._count += 1 task = self._store.get_next_task(self.queue_name) if task: # We have another task. Let's handle it. result = self.handle_task(task) result.addCallback(self._handle_next_task, last_task=task) result.addErrback(self._handle_task_failure) return result else: # No more tasks! We're done! return succeed(None) def _handle_task_failure(self, failure): """Gracefully handle a L{PackageTaskError} and stop handling tasks.""" failure.trap(PackageTaskError) def handle_task(self, task): """Handle a single task. Sub-classes must override this method in order to trigger task-specific actions. This method must return a L{Deferred} firing the task result. If the deferred is successful the task will be removed from the queue and the next one will be picked. If the task can't be completed, this method must raise a L{PackageTaskError}, in this case the handler will stop processing tasks and the failed task won't be removed from the queue. """ return succeed(None) @property def handled_tasks_count(self): """ Return the number of tasks that have been successfully handled so far. """ return self._count def use_hash_id_db(self): """ Attach the appropriate pre-canned hash=>id database to our store. """ def use_it(hash_id_db_filename): if hash_id_db_filename is None: # Couldn't determine which hash=>id database to use, # just ignore the failure and go on return if not os.path.exists(hash_id_db_filename): # The appropriate database isn't there, but nevermind # and just go on return try: self._store.add_hash_id_db(hash_id_db_filename) except InvalidHashIdDb: # The appropriate database is there but broken, # let's remove it and go on logging.warning("Invalid hash=>id database %s" % hash_id_db_filename) os.remove(hash_id_db_filename) return result = self._determine_hash_id_db_filename() result.addCallback(use_it) return result def _determine_hash_id_db_filename(self): """Build up the filename of the hash=>id database to use. @return: a deferred resulting in the filename to use or C{None} in case of errors. """ def got_server_uuid(server_uuid): warning = "Couldn't determine which hash=>id database to use: %s" if server_uuid is None: logging.warning(warning % "server UUID not available") return None try: lsb_release_info = parse_lsb_release(self.lsb_release_filename) except IOError, error: logging.warning(warning % str(error)) return None try: codename = lsb_release_info["code-name"] except KeyError: logging.warning(warning % "missing code-name key in %s" % self.lsb_release_filename) return None arch = self._facade.get_arch() if not arch: # The Apt code should always return a non-empty string, # so this branch shouldn't get executed at all. However # this check is kept as an extra paranoia sanity check. logging.warning(warning % "unknown dpkg architecture") return None return os.path.join(self._config.hash_id_directory, "%s_%s_%s" % (server_uuid, codename, arch)) result = self._broker.get_server_uuid() result.addCallback(got_server_uuid) return result def get_session_id(self): def got_session_id(session_id): self._session_id = session_id return session_id result = self._broker.get_session_id() result.addCallback(got_session_id) return result def run_task_handler(cls, args, reactor=None): # please only pass reactor when you have totally mangled everything with # mocker. Otherwise bad things will happen. if reactor is None: reactor = LandscapeReactor() config = cls.config_factory() config.load(args) for directory in [config.package_directory, config.hash_id_directory]: if not os.path.isdir(directory): os.mkdir(directory) program_name = cls.queue_name lock_filename = os.path.join(config.package_directory, program_name + ".lock") try: lock_path(lock_filename) except LockError: if config.quiet: raise SystemExit() raise SystemExit("error: package %s is already running" % program_name) words = re.findall("[A-Z][a-z]+", cls.__name__) init_logging(config, "-".join(word.lower() for word in words)) # Setup our umask for Apt to use, this needs to setup file permissions to # 0644 so... os.umask(022) package_store = cls.package_store_class(config.store_filename) # Delay importing of the facades so that we don't # import Apt unless we need to. from landscape.package.facade import AptFacade package_facade = AptFacade() def finish(): connector.disconnect() reactor.call_later(0, reactor.stop) def got_error(failure): log_failure(failure) finish() connector = RemoteBrokerConnector(reactor, config, retry_on_reconnect=True) remote = LazyRemoteBroker(connector) handler = cls(package_store, package_facade, remote, config) result = Deferred() result.addCallback(lambda x: handler.run()) result.addCallback(lambda x: finish()) result.addErrback(got_error) reactor.call_when_running(lambda: result.callback(None)) reactor.run() return result landscape-client-14.01/landscape/message_schemas.py0000644000175000017500000004071512301414317022177 0ustar andreasandreasfrom landscape.schema import ( Message, KeyDict, Dict, List, Tuple, Bool, Int, Float, Bytes, Unicode, Constant, Any) # When adding a new schema, which deprecates an older schema, the recommended # naming convention, is to name it SCHEMA_NAME_ and the last API version that # the schema works with. # # i.e. if I have USERS and I'm deprecating it, in API 2.2, then USERS becomes # USERS_2_1 process_info = KeyDict({"pid": Int(), "name": Unicode(), "state": Bytes(), "sleep-average": Int(), "uid": Int(), "gid": Int(), "vm-size": Int(), "start-time": Int(), "percent-cpu": Float()}, # Optional for backwards compatibility optional=["vm-size", "sleep-average", "percent-cpu"]) ACTIVE_PROCESS_INFO = Message( "active-process-info", {"kill-processes": List(Int()), "kill-all-processes": Bool(), "add-processes": List(process_info), "update-processes": List(process_info)}, # XXX Really we don't want all three of these keys to be optional: # we always want _something_... optional=["add-processes", "update-processes", "kill-processes", "kill-all-processes"]) COMPUTER_UPTIME = Message( "computer-uptime", {"startup-times": List(Int()), "shutdown-times": List(Int())}, # XXX Again, one or the other. optional=["startup-times", "shutdown-times"]) CLIENT_UPTIME = Message( "client-uptime", {"period": Tuple(Float(), Float()), "components": List(Int())}, optional=["components"]) # just for backwards compatibility OPERATION_RESULT = Message( "operation-result", {"operation-id": Int(), "status": Int(), "result-code": Int(), "result-text": Unicode()}, optional=["result-code", "result-text"]) #ACTION_INFO is obsolete. ACTION_INFO = Message( "action-info", {"response-id": Int(), "success": Bool(), "kind": Bytes(), "parameters": Bytes()}) COMPUTER_INFO = Message( "computer-info", {"hostname": Unicode(), "total-memory": Int(), "total-swap": Int(), "annotations": Dict(Unicode(), Unicode())}, # Not sure why these are all optional, but it's explicitly tested # in the server optional=["hostname", "total-memory", "total-swap", "annotations"]) DISTRIBUTION_INFO = Message( "distribution-info", {"distributor-id": Unicode(), "description": Unicode(), "release": Unicode(), "code-name": Unicode()}, # all optional because the lsb-release file may not have all data. optional=["distributor-id", "description", "release", "code-name"]) CLOUD_METADATA = Message( "cloud-instance-metadata", {"instance-id": Unicode(), "ami-id": Unicode(), "instance-type": Unicode()}) hal_data = Dict(Unicode(), Any(Unicode(), List(Unicode()), Bool(), Int(), Float())) HARDWARE_INVENTORY = Message("hardware-inventory", { "devices": List(Any(Tuple(Constant("create"), hal_data), Tuple(Constant("update"), Unicode(), # udi, hal_data, # creates, hal_data, # updates, hal_data), # deletes Tuple(Constant("delete"), Unicode()), ), )}) HARDWARE_INFO = Message("hardware-info", { "data": Unicode()}) juju_data = {"environment-uuid": Unicode(), "api-addresses": List(Unicode()), "unit-name": Unicode()} # The copy is needed because Message mutates the dictionary JUJU_INFO = Message("juju-info", juju_data.copy()) LOAD_AVERAGE = Message("load-average", { "load-averages": List(Tuple(Int(), Float())), }) CPU_USAGE = Message("cpu-usage", { "cpu-usages": List(Tuple(Int(), Float())), }) CEPH_USAGE = Message("ceph-usage", { "ceph-usages": List(Tuple(Int(), Float())), "ring-id": Unicode(), }) SWIFT_DEVICE_INFO = Message("swift-device-info", { "swift-device-info": List( KeyDict({"device": Unicode(), "mounted": Bool()})) }) KEYSTONE_TOKEN = Message("keystone-token", { "data": Any(Bytes(), Constant(None)) }) CHANGE_HA_SERVICE = Message( "change-ha-service", {"service-name": Bytes(), # keystone "unit-name": Bytes(), # keystone-9 "state": Bytes()}) # online or standby MEMORY_INFO = Message("memory-info", { "memory-info": List(Tuple(Float(), Int(), Int())), }) RESYNCHRONIZE = Message( "resynchronize", {"operation-id": Int()}, # operation-id is only there if it's a response to a server-initiated # resynchronize. optional=["operation-id"]) MOUNT_ACTIVITY = Message("mount-activity", { "activities": List(Tuple(Float(), Unicode(), Bool()))}) MOUNT_INFO = Message("mount-info", { "mount-info": List(Tuple(Float(), KeyDict({"mount-point": Unicode(), "device": Unicode(), "filesystem": Unicode(), "total-space": Int()}) )), }) FREE_SPACE = Message("free-space", { "free-space": List(Tuple(Float(), Unicode(), Int()))}) REGISTER = Message( "register", # The term used in the UI is actually 'registration_key', but we keep # the message schema field as 'registration_password' in case a new # client contacts an older server. {"registration_password": Any(Unicode(), Constant(None)), "computer_title": Unicode(), "hostname": Unicode(), "account_name": Unicode(), "tags": Any(Unicode(), Constant(None)), "vm-info": Bytes(), "container-info": Unicode(), "juju-info": KeyDict(juju_data), "access_group": Unicode()}, optional=["registration_password", "hostname", "tags", "vm-info", "container-info", "juju-info", "unicode", "access_group"]) REGISTER_PROVISIONED_MACHINE = Message( "register-provisioned-machine", {"otp": Bytes()}) REGISTER_CLOUD_VM = Message( "register-cloud-vm", {"hostname": Unicode(), "otp": Any(Bytes(), Constant(None)), "instance_key": Unicode(), "account_name": Any(Unicode(), Constant(None)), "registration_password": Any(Unicode(), Constant(None)), "reservation_key": Unicode(), "public_hostname": Unicode(), "local_hostname": Unicode(), "kernel_key": Any(Unicode(), Constant(None)), "ramdisk_key": Any(Unicode(), Constant(None)), "launch_index": Int(), "image_key": Unicode(), "tags": Any(Unicode(), Constant(None)), "vm-info": Bytes(), "public_ipv4": Unicode(), "local_ipv4": Unicode(), "access_group": Unicode()}, optional=["tags", "vm-info", "public_ipv4", "local_ipv4", "access_group"]) TEMPERATURE = Message("temperature", { "thermal-zone": Unicode(), "temperatures": List(Tuple(Int(), Float())), }) PROCESSOR_INFO = Message( "processor-info", {"processors": List(KeyDict({"processor-id": Int(), "vendor": Unicode(), "model": Unicode(), "cache-size": Int(), }, optional=["vendor", "cache-size"]))}) user_data = KeyDict({ "uid": Int(), "username": Unicode(), "name": Any(Unicode(), Constant(None)), "enabled": Bool(), "location": Any(Unicode(), Constant(None)), "home-phone": Any(Unicode(), Constant(None)), "work-phone": Any(Unicode(), Constant(None)), "primary-gid": Any(Int(), Constant(None)), "primary-groupname": Unicode()}, optional=["primary-groupname", "primary-gid"]) group_data = KeyDict({ "gid": Int(), "name": Unicode()}) USERS = Message( "users", {"operation-id": Int(), "create-users": List(user_data), "update-users": List(user_data), "delete-users": List(Unicode()), "create-groups": List(group_data), "update-groups": List(group_data), "delete-groups": List(Unicode()), "create-group-members": Dict(Unicode(), List(Unicode())), "delete-group-members": Dict(Unicode(), List(Unicode())), }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) optional=["operation-id", "create-users", "update-users", "delete-users", "create-groups", "update-groups", "delete-groups", "create-group-members", "delete-group-members"]) USERS_2_1 = Message( "users", {"operation-id": Int(), "create-users": List(user_data), "update-users": List(user_data), "delete-users": List(Int()), "create-groups": List(group_data), "update-groups": List(group_data), "delete-groups": List(Int()), "create-group-members": Dict(Int(), List(Int())), "delete-group-members": Dict(Int(), List(Int())), }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) optional=["operation-id", "create-users", "update-users", "delete-users", "create-groups", "update-groups", "delete-groups", "create-group-members", "delete-group-members"]) USERS_2_0 = Message( "users", {"operation-id": Int(), "create-users": List(user_data), "update-users": List(user_data), "delete-users": List(Int()), "create-groups": List(group_data), "update-groups": List(group_data), "delete-groups": List(Int()), "create-group-members": Dict(Int(), List(Int())), "delete-group-members": Dict(Int(), List(Int())), }, # operation-id is only there for responses, and all other are # optional as long as one of them is there (no way to say that yet) optional=["operation-id", "create-users", "update-users", "delete-users", "create-groups", "update-groups", "delete-groups", "create-group-members", "delete-group-members"]) opt_str = Any(Unicode(), Constant(None)) OLD_USERS = Message( "users", {"users": List(KeyDict({"username": Unicode(), "uid": Int(), "realname": opt_str, "location": opt_str, "home-phone": opt_str, "work-phone": opt_str, "enabled": Bool()}, optional=["location", "home-phone", "work-phone"])), "groups": List(KeyDict({"gid": Int(), "name": Unicode(), "members": List(Unicode())}))}, optional=["groups"]) package_ids_or_ranges = List(Any(Tuple(Int(), Int()), Int())) PACKAGES = Message( "packages", {"installed": package_ids_or_ranges, "available": package_ids_or_ranges, "available-upgrades": package_ids_or_ranges, "locked": package_ids_or_ranges, "not-installed": package_ids_or_ranges, "not-available": package_ids_or_ranges, "not-available-upgrades": package_ids_or_ranges, "not-locked": package_ids_or_ranges}, optional=["installed", "available", "available-upgrades", "locked", "not-available", "not-installed", "not-available-upgrades", "not-locked"]) package_locks = List(Tuple(Unicode(), Unicode(), Unicode())) PACKAGE_LOCKS = Message( "package-locks", {"created": package_locks, "deleted": package_locks}, optional=["created", "deleted"]) CHANGE_PACKAGE_HOLDS = Message( "change-package-holds", {"created": List(Unicode()), "deleted": List(Unicode())}, optional=["created", "deleted"]) CHANGE_PACKAGES_RESULT = Message( "change-packages-result", {"operation-id": Int(), "must-install": List(Any(Int(), Constant(None))), "must-remove": List(Any(Int(), Constant(None))), "result-code": Int(), "result-text": Unicode()}, optional=["result-text", "must-install", "must-remove"]) UNKNOWN_PACKAGE_HASHES = Message("unknown-package-hashes", { "hashes": List(Bytes()), "request-id": Int(), }) PACKAGE_REPORTER_RESULT = Message("package-reporter-result", { "code": Int(), "err": Unicode()}) ADD_PACKAGES = Message("add-packages", { "packages": List(KeyDict({"name": Unicode(), "description": Unicode(), "section": Unicode(), "relations": List(Tuple(Int(), Unicode())), "summary": Unicode(), "installed-size": Any(Int(), Constant(None)), "size": Any(Int(), Constant(None)), "version": Unicode(), "type": Int(), })), "request-id": Int(), }) TEXT_MESSAGE = Message("text-message", { "message": Unicode()}) TEST = Message( "test", {"greeting": Bytes(), "consistency-error": Bool(), "echo": Bytes(), "sequence": Int()}, optional=["greeting", "consistency-error", "echo", "sequence"]) # The tuples are timestamp, value GRAPH_DATA = KeyDict({"values": List(Tuple(Float(), Float())), "error": Unicode(), "script-hash": Bytes()}) CUSTOM_GRAPH = Message("custom-graph", { "data": Dict(Int(), GRAPH_DATA)}) # XXX This is kept for backward compatibility, it can eventually be removed # when all clients will support REBOOT_REQUIRED_INFO REBOOT_REQUIRED = Message( "reboot-required", {"flag": Bool()}) REBOOT_REQUIRED_INFO = Message( "reboot-required-info", {"flag": Bool(), "packages": List(Unicode())}, optional=["flag", "packages"]) APT_PREFERENCES = Message( "apt-preferences", {"data": Any(Dict(Unicode(), Unicode()), Constant(None))}) EUCALYPTUS_INFO = Message( "eucalyptus-info", {"basic_info": Dict(Bytes(), Any(Bytes(), Constant(None))), "walrus_info": Bytes(), "cluster_controller_info": Bytes(), "storage_controller_info": Bytes(), "node_controller_info": Bytes(), "capacity_info": Bytes()}, optional=["capacity_info"]) EUCALYPTUS_INFO_ERROR = Message( "eucalyptus-info-error", {"error": Bytes()}) # The network-device message is split in two top level keys because we don't # support adding sub-keys in a backwards-compatible way (only top-level keys). # New servers will see an optional device-speeds key, and old servers will # simply ignore the extra info.. NETWORK_DEVICE = Message( "network-device", {"devices": List(KeyDict({"interface": Bytes(), "ip_address": Bytes(), "mac_address": Bytes(), "broadcast_address": Bytes(), "netmask": Bytes(), "flags": Int()})), "device-speeds": List(KeyDict({"interface": Bytes(), "speed": Int(), "duplex": Bool()}))}, optional=["device-speeds"]) NETWORK_ACTIVITY = Message( "network-activity", # Dict maps interfaces to their network activity. The network activity of # an interface a is a list of 3-tuples (step, in, out), where 'step' is the # time interval and 'in'/'out' are number of bytes received/sent over the # interval. {"activities": Dict(Bytes(), List(Tuple(Int(), Int(), Int())))}) UPDATE_MANAGER_INFO = Message("update-manager-info", {"prompt": Unicode()}) message_schemas = {} for schema in [ACTIVE_PROCESS_INFO, COMPUTER_UPTIME, CLIENT_UPTIME, OPERATION_RESULT, COMPUTER_INFO, DISTRIBUTION_INFO, HARDWARE_INVENTORY, HARDWARE_INFO, LOAD_AVERAGE, MEMORY_INFO, RESYNCHRONIZE, MOUNT_ACTIVITY, MOUNT_INFO, FREE_SPACE, REGISTER, REGISTER_CLOUD_VM, REGISTER_PROVISIONED_MACHINE, TEMPERATURE, PROCESSOR_INFO, USERS, PACKAGES, PACKAGE_LOCKS, CHANGE_PACKAGES_RESULT, UNKNOWN_PACKAGE_HASHES, ADD_PACKAGES, PACKAGE_REPORTER_RESULT, TEXT_MESSAGE, TEST, CUSTOM_GRAPH, REBOOT_REQUIRED, APT_PREFERENCES, EUCALYPTUS_INFO, EUCALYPTUS_INFO_ERROR, NETWORK_DEVICE, NETWORK_ACTIVITY, REBOOT_REQUIRED_INFO, UPDATE_MANAGER_INFO, CPU_USAGE, CEPH_USAGE, SWIFT_DEVICE_INFO, KEYSTONE_TOKEN, CHANGE_HA_SERVICE, JUJU_INFO, CLOUD_METADATA]: message_schemas[schema.type] = schema landscape-client-14.01/landscape/amp.py0000644000175000017500000001333312301414317017621 0ustar andreasandreas"""Communication between components in different services via twisted AMP. The Landscape client is composed by several processes that need to talk to each other. For example the monitor and manager processes need to talk to the broker in order to ask it to add new messages to the outgoing queue, and the broker needs to talk to them in order to dispatch them incoming messages from the server. This module implements a few conveniences built around L{landscape.lib.amp} to let the various services connect to each other in an easy and idiomatic way, and have them respond to standard requests like "ping" or "exit". """ import os import logging from landscape.lib.amp import ( MethodCallClientFactory, MethodCallServerFactory, RemoteObject) class ComponentPublisher(object): """Publish a Landscape client component using a UNIX socket. Other Landscape client processes can then connect to the socket and invoke methods on the component remotely, using L{MethodCall} commands. @param component: The component to publish. It can be any Python object implementing the methods listed in the C{methods} class variable. @param reactor: The L{LandscapeReactor} used to listen to the socket. @param config: The L{Configuration} object used to build the socket path. """ factory = MethodCallServerFactory def __init__(self, component, reactor, config): self._reactor = reactor self._config = config self._component = component self._port = None self.methods = get_remote_methods(type(component)).keys() def start(self): """Start accepting connections.""" factory = MethodCallServerFactory(self._component, self.methods) socket_path = _get_socket_path(self._component, self._config) self._port = self._reactor.listen_unix(socket_path, factory) def stop(self): """Stop accepting connections.""" return self._port.stopListening() def get_remote_methods(klass): """Get all the remote methods declared on a class. @param klass: A class to search for AMP-exposed methods. """ remote_methods = {} for attribute_name in dir(klass): potential_method = getattr(klass, attribute_name) name = getattr(potential_method, "amp_exposed", None) if name is not None: remote_methods[name] = potential_method return remote_methods def remote(method): """ A decorator for marking a method as remotely accessible as a method on a component. """ method.amp_exposed = method.__name__ return method class ComponentConnector(object): """Utility superclass for creating connections with a Landscape component. @cvar component: The class of the component to connect to, it is expected to define a C{name} class attribute, which will be used to find out the socket to use. It must be defined by sub-classes. @cvar factory: The factory class to use for building protocols. @cvar remote: The L{RemoteObject} class or sub-class used for building remote objects. @param reactor: A L{LandscapeReactor} object. @param config: A L{LandscapeConfiguration}. @param retry_on_reconnect: If C{True} the remote object built by this connector will retry L{MethodCall}s that failed due to lost connections. @see: L{MethodCallClientFactory}. """ factory = MethodCallClientFactory component = None # Must be defined by sub-classes remote = RemoteObject def __init__(self, reactor, config, retry_on_reconnect=False): self._reactor = reactor self._config = config self._retry_on_reconnect = retry_on_reconnect self._connector = None def connect(self, max_retries=None, factor=None, quiet=False): """Connect to the remote Landscape component. If the connection is lost after having been established, and then it is established again by the reconnect mechanism, an event will be fired. @param max_retries: If given, the connector will keep trying to connect up to that number of times, if the first connection attempt fails. @param factor: Optionally a float indicating by which factor the delay between subsequent retries should increase. Smaller values result in a faster reconnection attempts pace. @param quiet: A boolean indicating whether to log errors. """ factory = self.factory(self._reactor._reactor) factory.initialDelay = factory.delay = 0.05 factory.retryOnReconnect = self._retry_on_reconnect factory.remote = self.remote factory.maxRetries = max_retries if factor: factory.factor = factor def fire_reconnect(ignored): self._reactor.fire("%s-reconnect" % self.component.name) def connected(remote): factory.notifyOnConnect(fire_reconnect) return remote def log_error(failure): logging.error("Error while connecting to %s", self.component.name) return failure socket_path = _get_socket_path(self.component, self._config) deferred = factory.getRemoteObject() self._connector = self._reactor.connect_unix(socket_path, factory) if not quiet: deferred.addErrback(log_error) return deferred.addCallback(connected) def disconnect(self): """Disconnect the L{RemoteObject} that we have created.""" if self._connector is not None: factory = self._connector.factory factory.stopTrying() self._connector.disconnect() self._connector = None def _get_socket_path(component, config): return os.path.join(config.sockets_path, component.name + ".sock") landscape-client-14.01/landscape/configuration.py0000644000175000017500000007035312301414317021720 0ustar andreasandreas"""Interactive configuration support for Landscape. This module, and specifically L{LandscapeSetupScript}, implements the support for the C{landscape-config} script. """ import json import base64 import time import sys import os import getpass import pwd from StringIO import StringIO from landscape.lib.tag import is_valid_tag from landscape.sysvconfig import SysVConfig, ProcessError from landscape.lib.amp import MethodCallError from landscape.lib.twisted_util import gather_results from landscape.lib.fetch import fetch, FetchError, HTTPCodeError from landscape.lib.bootstrap import BootstrapList, BootstrapDirectory from landscape.reactor import LandscapeReactor from landscape.broker.registration import InvalidCredentialsError from landscape.broker.config import BrokerConfiguration from landscape.broker.amp import RemoteBrokerConnector class ConfigurationError(Exception): """Raised when required configuration values are missing.""" class ImportOptionError(ConfigurationError): """Raised when there are issues with handling the --import option.""" def print_text(text, end="\n", error=False): if error: stream = sys.stderr else: stream = sys.stdout stream.write(text + end) stream.flush() def get_invalid_users(users): """ Process a string with a list of comma separated usernames, this returns any usernames not known to the underlying user database. """ if users is not None: user_list = [user.strip() for user in users.split(",")] if "ALL" in user_list: if len(user_list) > 1: raise ConfigurationError( "Extra users specified with ALL users") user_list.remove("ALL") invalid_users = [] for user in user_list: try: pwd.getpwnam(user) except KeyError: invalid_users.append(user) return invalid_users class LandscapeSetupConfiguration(BrokerConfiguration): unsaved_options = ("no_start", "disable", "silent", "ok_no_register", "import_from") def _load_external_options(self): """Handle the --import parameter. Imported options behave as if they were passed in the command line, with precedence being given to real command line options. """ if self.import_from: parser = None try: if "://" in self.import_from: # If it's from a URL, download it now. if self.http_proxy: os.environ["http_proxy"] = self.http_proxy if self.https_proxy: os.environ["https_proxy"] = self.https_proxy content = self.fetch_import_url(self.import_from) parser = self._get_config_object( alternative_config=StringIO(content)) elif not os.path.isfile(self.import_from): raise ImportOptionError("File %s doesn't exist." % self.import_from) else: try: parser = self._get_config_object( alternative_config=self.import_from) except: raise ImportOptionError( "Couldn't read configuration from %s." % self.import_from) except Exception, error: raise ImportOptionError(str(error)) # But real command line options have precedence. options = None if parser and self.config_section in parser: options = parser[self.config_section] if not options: raise ImportOptionError("Nothing to import at %s." % self.import_from) options.update(self._command_line_options) self._command_line_options = options def fetch_import_url(self, url): """Handle fetching of URLs passed to --url.""" print_text("Fetching configuration from %s..." % url) error_message = None try: content = fetch(url) except FetchError, error: error_message = str(error) if error_message is not None: raise ImportOptionError( "Couldn't download configuration from %s: %s" % (url, error_message)) return content def make_parser(self): """ Specialize the parser, adding configure-specific options. """ parser = super(LandscapeSetupConfiguration, self).make_parser() parser.add_option("--import", dest="import_from", metavar="FILENAME_OR_URL", help="Filename or URL to import configuration from. " "Imported options behave as if they were " "passed in the command line, with precedence " "being given to real command line options.") parser.add_option("--script-users", metavar="USERS", help="A comma-separated list of users to allow " "scripts to run. To allow scripts to be run " "by any user, enter: ALL") parser.add_option("--include-manager-plugins", metavar="PLUGINS", default="", help="A comma-separated list of manager plugins to " "load.") parser.add_option("-n", "--no-start", action="store_true", help="Don't start the client automatically.") parser.add_option("--ok-no-register", action="store_true", help="Return exit code 0 instead of 2 if the client " "can't be registered.") parser.add_option("--silent", action="store_true", default=False, help="Run without manual interaction.") parser.add_option("--disable", action="store_true", default=False, help="Stop running clients and disable start at " "boot.") parser.add_option("--init", action="store_true", default=False, help="Set up the client directories structure " "and exit.") return parser class LandscapeSetupScript(object): """ An interactive procedure which manages the prompting and temporary storage of configuration parameters. Various attributes on this object will be set on C{config} after L{run} is called. @ivar config: The L{BrokerConfiguration} object to read and set values from and to. """ def __init__(self, config): self.config = config def show_help(self, text): lines = text.strip().splitlines() print_text("\n" + "".join([line.strip() + "\n" for line in lines])) def prompt_get_input(self, msg, required): """Prompt the user on the terminal for a value @param msg: Message to prompt user with @param required: True if value must be entered """ while True: value = raw_input(msg) if value: return value elif not required: break self.show_help("This option is required to configure Landscape.") def prompt(self, option, msg, required=False): """Prompt the user on the terminal for a value. @param option: The attribute of C{self.config} that contains the default and which the value will be assigned to. @param msg: The message to prompt the user with (via C{raw_input}). @param required: If True, the user will be required to enter a value before continuing. """ default = getattr(self.config, option, None) if default: msg += " [%s]: " % default else: msg += ": " required = required and not (bool(default)) result = self.prompt_get_input(msg, required) if result: setattr(self.config, option, result) def password_prompt(self, option, msg, required=False): """Prompt the user on the terminal for a password and mask the value. This also prompts the user twice and errors if both values don't match. @param option: The attribute of C{self.config} that contains the default and which the value will be assigned to. @param msg: The message to prompt the user with (via C{raw_input}). @param required: If True, the user will be required to enter a value before continuing. """ default = getattr(self.config, option, None) msg += ": " while True: value = getpass.getpass(msg) if value: value2 = getpass.getpass("Please confirm: ") if value: if value != value2: self.show_help("Keys must match.") else: setattr(self.config, option, value) break elif default or not required: break else: self.show_help("This option is required to configure " "Landscape.") def prompt_yes_no(self, message, default=True): if default: default_msg = " [Y/n]" else: default_msg = " [y/N]" while True: value = raw_input(message + default_msg).lower() if value: if value.startswith("n"): return False if value.startswith("y"): return True self.show_help("Invalid input.") else: return default def query_computer_title(self): if "computer_title" in self.config.get_command_line_options(): return self.show_help( """ The computer title you provide will be used to represent this computer in the Landscape user interface. It's important to use a title that will allow the system to be easily recognized when it appears on the pending computers page. """) self.prompt("computer_title", "This computer's title", True) def query_account_name(self): if "account_name" in self.config.get_command_line_options(): return self.show_help( """ You must now specify the name of the Landscape account you want to register this computer with. You can verify the names of the accounts you manage on your dashboard at https://landscape.canonical.com/dashboard """) self.prompt("account_name", "Account name", True) def query_registration_key(self): command_line_options = self.config.get_command_line_options() if "registration_key" in command_line_options: return self.show_help( """ A registration key may be associated with your Landscape account to prevent unauthorized registration attempts. This is not your personal login password. It is optional, and unless explicitly set on the server, it may be skipped here. If you don't remember the registration key you can find it at https://landscape.canonical.com/account/%s """ % self.config.account_name) self.password_prompt("registration_key", "Account registration key") def query_proxies(self): options = self.config.get_command_line_options() if "http_proxy" in options and "https_proxy" in options: return self.show_help( """ The Landscape client communicates with the server over HTTP and HTTPS. If your network requires you to use a proxy to access HTTP and/or HTTPS web sites, please provide the address of these proxies now. If you don't use a proxy, leave these fields empty. """) if not "http_proxy" in options: self.prompt("http_proxy", "HTTP proxy URL") if not "https_proxy" in options: self.prompt("https_proxy", "HTTPS proxy URL") def query_script_plugin(self): options = self.config.get_command_line_options() if "include_manager_plugins" in options and "script_users" in options: invalid_users = get_invalid_users(options["script_users"]) if invalid_users: raise ConfigurationError("Unknown system users: %s" % ", ".join(invalid_users)) return self.show_help( """ Landscape has a feature which enables administrators to run arbitrary scripts on machines under their control. By default this feature is disabled in the client, disallowing any arbitrary script execution. If enabled, the set of users that scripts may run as is also configurable. """) msg = "Enable script execution?" included_plugins = [ p.strip() for p in self.config.include_manager_plugins.split(",")] if included_plugins == [""]: included_plugins = [] default = "ScriptExecution" in included_plugins if self.prompt_yes_no(msg, default=default): if "ScriptExecution" not in included_plugins: included_plugins.append("ScriptExecution") self.show_help( """ By default, scripts are restricted to the 'landscape' and 'nobody' users. Please enter a comma-delimited list of users that scripts will be restricted to. To allow scripts to be run by any user, enter "ALL". """) while True: self.prompt("script_users", "Script users") invalid_users = get_invalid_users( self.config.script_users) if not invalid_users: break else: self.show_help("Unknown system users: %s" % ",".join(invalid_users)) self.config.script_users = None else: if "ScriptExecution" in included_plugins: included_plugins.remove("ScriptExecution") self.config.include_manager_plugins = ", ".join(included_plugins) def query_access_group(self): """Query access group from the user.""" self.show_help("You may provide an access group for this computer " "e.g. webservers.") options = self.config.get_command_line_options() if "access_group" in options: return # an access group is already provided, don't ask for one self.prompt("access_group", "Access group", False) def _get_invalid_tags(self, tagnames): """ Splits a string on , and checks the validity of each tag, returns any invalid tags. """ invalid_tags = [] if tagnames: tags = [tag.strip() for tag in tagnames.split(",")] invalid_tags = [tag for tag in tags if not is_valid_tag(tag)] return invalid_tags def query_tags(self): """Query tags from the user.""" options = self.config.get_command_line_options() if "tags" in options: invalid_tags = self._get_invalid_tags(options["tags"]) if invalid_tags: raise ConfigurationError("Invalid tags: %s" % ", ".join(invalid_tags)) return self.show_help("You may provide tags for this computer e.g. " "server,precise.") while True: self.prompt("tags", "Tags", False) if self._get_invalid_tags(self.config.tags): self.show_help("Tag names may only contain alphanumeric " "characters.") self.config.tags = None # Reset for the next prompt else: break def show_header(self): self.show_help( """ This script will interactively set up the Landscape client. It will ask you a few questions about this computer and your Landscape account, and will submit that information to the Landscape server. After this computer is registered it will need to be approved by an account administrator on the pending computers page. Please see https://landscape.canonical.com for more information. """) def run(self): """Kick off the interactive process which prompts the user for data. Data will be saved to C{self.config}. """ self.show_header() self.query_computer_title() self.query_account_name() self.query_registration_key() self.query_proxies() self.query_script_plugin() self.query_access_group() self.query_tags() def setup_init_script_and_start_client(): "Configure the init script to start the client on boot." # XXX This function is misnamed; it doesn't start the client. sysvconfig = SysVConfig() sysvconfig.set_start_on_boot(True) def stop_client_and_disable_init_script(): """ Stop landscape-client and change configuration to prevent starting landscape-client on boot. """ sysvconfig = SysVConfig() sysvconfig.stop_landscape() sysvconfig.set_start_on_boot(False) def setup_http_proxy(config): """ If a http_proxy and a https_proxy value are not set then copy the values, if any, from the environment variables L{http_proxy} and L{https_proxy}. """ if config.http_proxy is None and os.environ.get("http_proxy"): config.http_proxy = os.environ["http_proxy"] if config.https_proxy is None and os.environ.get("https_proxy"): config.https_proxy = os.environ["https_proxy"] def check_account_name_and_password(config): """ Ensure that silent configurations which plan to start landscape-client are either configured for OTP or have both an account_name and computer title. """ if config.silent and not config.no_start: if not (config.get("otp") or config.provisioning_otp or (config.get("account_name") and config.get("computer_title"))): raise ConfigurationError("An account name and computer title are " "required.") def check_script_users(config): """ If the configuration allows for script execution ensure that the configured users are valid for that purpose. """ if config.get("script_users"): invalid_users = get_invalid_users(config.get("script_users")) if invalid_users: raise ConfigurationError("Unknown system users: %s" % ", ".join(invalid_users)) if not config.include_manager_plugins: config.include_manager_plugins = "ScriptExecution" def decode_base64_ssl_public_certificate(config): """ Decode base64 encoded SSL certificate and push that back into place in the config object. """ # WARNING: ssl_public_certificate is misnamed, it's not the key of the # certificate, but the actual certificate itself. if config.ssl_public_key and config.ssl_public_key.startswith("base64:"): decoded_cert = base64.decodestring(config.ssl_public_key[7:]) config.ssl_public_key = store_public_key_data( config, decoded_cert) def fetch_base64_ssl_public_certificate(hostname, on_info=print_text, on_error=print_text): """ Fetch base64 encoded SSL CA certificate from the discovered landscape server and return that decoded info. """ on_info("Fetching CA certificate from %s if available..." % hostname) content = "" encoded_cert = "" ca_url = "http://%s/get-ca-cert" % hostname try: content = fetch(ca_url, insecure=True) except HTTPCodeError, error: on_error("Unable to fetch CA certificate from discovered server %s: " "Server does not support client auto-registation." % hostname) return encoded_cert except FetchError, error: on_error("Unable to fetch CA certificate from %s: %s" % (hostname, str(error))) return encoded_cert if content: ca_dict = json.loads(content) try: if ca_dict["custom_ca_cert"].startswith("base64:"): encoded_cert = ca_dict["custom_ca_cert"] else: on_error("Auto-registration URL %s returns invalid CA JSON: " "%s." % (ca_url, ca_dict)) except KeyError: # No custom CA certificate needed to talk with this server on_info("No custom CA certificate available for %s." % hostname) else: on_error("Unable to fetch CA certificate from discovered server " "%s. Proceding without custom CA certificate." % hostname) return encoded_cert def setup(config): """ Perform steps to ensure that landscape-client is correctly configured before we attempt to register it with a landscape server. If we are not configured to be silent then interrogate the user to provide necessary details for registration. """ bootstrap_tree(config) sysvconfig = SysVConfig() if not config.no_start: if config.silent: setup_init_script_and_start_client() elif not sysvconfig.is_configured_to_run(): answer = raw_input("\nThe Landscape client must be started " "on boot to operate correctly.\n\n" "Start Landscape client on boot? (Y/n): ") if not answer.upper().startswith("N"): setup_init_script_and_start_client() else: sys.exit("Aborting Landscape configuration") setup_http_proxy(config) check_account_name_and_password(config) if config.silent: check_script_users(config) else: script = LandscapeSetupScript(config) script.run() decode_base64_ssl_public_certificate(config) config.write() # Restart the client to ensure that it's using the new configuration. if not config.no_start and not config.otp: try: sysvconfig.restart_landscape() except ProcessError: print_text("Couldn't restart the Landscape client.", error=True) print_text("This machine will be registered with the provided " "details when the client runs.", error=True) exit_code = 2 if config.ok_no_register: exit_code = 0 sys.exit(exit_code) def bootstrap_tree(config): """Create the client directories tree.""" bootstrap_list = [ BootstrapDirectory("$data_path", "landscape", "root", 0755), BootstrapDirectory("$annotations_path", "landscape", "landscape", 0755)] BootstrapList(bootstrap_list).bootstrap( data_path=config.data_path, annotations_path=config.annotations_path) def store_public_key_data(config, certificate_data): """ Write out the data from the SSL certificate provided to us, either from a bootstrap.conf file, or from EC2-style user-data. @param config: The L{BrokerConfiguration} object in use. @param certificate_data: a string of data that represents the contents of the file to be written. @return the L{BrokerConfiguration} object that was passed in, updated to reflect the path of the ssl_public_key file. """ key_filename = os.path.join(config.data_path, os.path.basename(config.get_config_filename() + ".ssl_public_key")) print_text("Writing SSL CA certificate to %s..." % key_filename) key_file = open(key_filename, "w") key_file.write(certificate_data) key_file.close() return key_filename def register(config, on_message=print_text, on_error=sys.exit, reactor=None, max_retries=14): """Instruct the Landscape Broker to register the client. The broker will be instructed to reload its configuration and then to attempt a registration. @param reactor: The reactor to use. Please only pass reactor when you have totally mangled everything with mocker. Otherwise bad things will happen. @param max_retries: The number of times to retry connecting to the landscape client service. The delay between retries is calculated by Twisted and increases geometrically. The default of 14 results in a total wait time of about 70 seconds. initialDelay = 0.05 factor = 1.62 maxDelay = 30 max_retries = 14 0.05 * (1 - 1.62 ** 14) / (1 - 1.62) = 69 seconds """ if reactor is None: reactor = LandscapeReactor() exit_with_error = [] def stop(errors): if not config.ok_no_register: for error in errors: if error is not None: exit_with_error.append(error) connector.disconnect() reactor.stop() def failure(): on_message("Invalid account name or " "registration key.", error=True) return 2 def success(): on_message("System successfully registered.") def exchange_failure(): on_message("We were unable to contact the server. " "Your internet connection may be down. " "The landscape client will continue to try and contact " "the server periodically.", error=True) return 2 def handle_registration_errors(failure): # We'll get invalid credentials through the signal. failure.trap(InvalidCredentialsError, MethodCallError) connector.disconnect() def catch_all(failure): on_message(failure.getTraceback(), error=True) on_message("Unknown error occurred.", error=True) return [2] on_message("Please wait... ", "") time.sleep(2) def got_connection(remote): handlers = {"registration-done": success, "registration-failed": failure, "exchange-failed": exchange_failure} deferreds = [ remote.reload_configuration(), remote.call_on_event(handlers), remote.register().addErrback(handle_registration_errors)] # We consume errors here to ignore errors after the first one. # catch_all will be called for the very first deferred that fails. results = gather_results(deferreds, consume_errors=True) results.addErrback(catch_all) results.addCallback(stop) def got_error(failure): on_message("There was an error communicating with the Landscape" " client.", error=True) on_message("This machine will be registered with the provided " "details when the client runs.", error=True) stop([2]) connector = RemoteBrokerConnector(reactor, config) result = connector.connect(max_retries=max_retries, quiet=True) result.addCallback(got_connection) result.addErrback(got_error) reactor.run() if exit_with_error: on_error(exit_with_error[0]) return result def main(args): config = LandscapeSetupConfiguration() try: config.load(args) except ImportOptionError, error: print_text(str(error), error=True) sys.exit(1) if os.getuid() != 0: sys.exit("landscape-config must be run as root.") if config.init: bootstrap_tree(config) sys.exit(0) # Disable startup on boot and stop the client, if one is running. if config.disable: stop_client_and_disable_init_script() return # Setup client configuration. try: setup(config) except Exception, e: print_text(str(e)) sys.exit("Aborting Landscape configuration") # Attempt to register the client. if config.silent: register(config) else: answer = raw_input("\nRequest a new registration for " "this computer now? (Y/n): ") if not answer.upper().startswith("N"): register(config) landscape-client-14.01/landscape/sysinfo/0000755000175000017500000000000012301414317020161 5ustar andreasandreaslandscape-client-14.01/landscape/sysinfo/network.py0000644000175000017500000000222212301414317022222 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.lib.network import get_active_device_info class Network(object): """Show information about active network interfaces. @param get_device_info: Optionally, a function that returns information about network interfaces. Defaults to L{get_active_device_info}. """ def __init__(self, get_device_info=None): if get_device_info is None: get_device_info = get_active_device_info self._get_device_info = get_device_info def register(self, sysinfo): """Register this plugin with the sysinfo system. @param sysinfo: The sysinfo registry. """ self._sysinfo = sysinfo def run(self): """ Gather information about network interfaces and write it to the sysinfo output. @return: A succeeded C{Deferred}. """ for info in self._get_device_info(): interface = info["interface"] ip_address = info["ip_address"] self._sysinfo.add_header("IP address for %s" % interface, ip_address) return succeed(None) landscape-client-14.01/landscape/sysinfo/loggedinusers.py0000644000175000017500000000076112301414317023411 0ustar andreasandreasfrom landscape.lib.sysstats import get_logged_in_users class LoggedInUsers(object): def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_header("Users logged in", None) def add_header(logged_users): self._sysinfo.add_header("Users logged in", str(len(logged_users))) result = get_logged_in_users() result.addCallback(add_header) result.addErrback(lambda failure: None) return result landscape-client-14.01/landscape/sysinfo/processes.py0000644000175000017500000000157712301414317022553 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.lib.process import ProcessInformation class Processes(object): def __init__(self, proc_dir="/proc"): self._proc_dir = proc_dir def register(self, sysinfo): self._sysinfo = sysinfo def run(self): num_processes = 0 num_zombies = 0 info = ProcessInformation(proc_dir=self._proc_dir) for process_info in info.get_all_process_info(): num_processes += 1 if process_info["state"] == "Z": num_zombies += 1 if num_zombies: if num_zombies == 1: msg = "There is 1 zombie process." else: msg = "There are %d zombie processes." % (num_zombies,) self._sysinfo.add_note(msg) self._sysinfo.add_header("Processes", str(num_processes)) return succeed(None) landscape-client-14.01/landscape/sysinfo/deployment.py0000644000175000017500000001077712301414317022727 0ustar andreasandreas"""Deployment code for the sysinfo tool.""" import os import sys from logging import getLogger, Formatter from logging.handlers import RotatingFileHandler from twisted.python.reflect import namedClass from twisted.internet.defer import Deferred, maybeDeferred from landscape.deployment import BaseConfiguration from landscape.sysinfo.sysinfo import SysInfoPluginRegistry, format_sysinfo ALL_PLUGINS = ["Load", "Disk", "Memory", "Temperature", "Processes", "LoggedInUsers", "LandscapeLink", "Network"] class SysInfoConfiguration(BaseConfiguration): """Specialized configuration for the Landscape sysinfo tool.""" default_config_filenames = ("/etc/landscape/client.conf",) if os.getuid() != 0: default_config_filenames += ( os.path.expanduser("~/.landscape/sysinfo.conf"),) config_section = "sysinfo" def make_parser(self): """ Specialize L{Configuration.make_parser}, adding any sysinfo-specific options. """ parser = super(SysInfoConfiguration, self).make_parser() parser.add_option("--sysinfo-plugins", metavar="PLUGIN_LIST", help="Comma-delimited list of sysinfo plugins to " "use. Default is to use all plugins.") parser.add_option("--exclude-sysinfo-plugins", metavar="PLUGIN_LIST", help="Comma-delimited list of sysinfo plugins to " "NOT use. This always take precedence over " "plugins to include.") parser.epilog = "Default plugins: %s" % (", ".join(ALL_PLUGINS)) return parser def get_plugin_names(self, plugin_spec): return [x.strip() for x in plugin_spec.split(",")] def get_plugins(self): if self.sysinfo_plugins is None: include = ALL_PLUGINS else: include = self.get_plugin_names(self.sysinfo_plugins) if self.exclude_sysinfo_plugins is None: exclude = [] else: exclude = self.get_plugin_names(self.exclude_sysinfo_plugins) plugins = [x for x in include if x not in exclude] return [namedClass("landscape.sysinfo.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in plugins] def get_landscape_log_directory(landscape_dir=None): """ Work out the correct path to store logs in depending on the effective user id of the current process. """ if landscape_dir is None: if os.getuid() == 0: landscape_dir = "/var/log/landscape" else: landscape_dir = os.path.expanduser("~/.landscape") return landscape_dir def setup_logging(landscape_dir=None): landscape_dir = get_landscape_log_directory(landscape_dir) logger = getLogger("landscape-sysinfo") logger.propagate = False if not os.path.isdir(landscape_dir): os.mkdir(landscape_dir) log_filename = os.path.join(landscape_dir, "sysinfo.log") handler = RotatingFileHandler(log_filename, maxBytes=500 * 1024, backupCount=1) logger.addHandler(handler) handler.setFormatter(Formatter("%(asctime)s %(levelname)-8s %(message)s")) def run(args, reactor=None, sysinfo=None): """ @param reactor: The reactor to (optionally) run the sysinfo plugins in. """ try: setup_logging() except IOError, e: sys.exit("Unable to setup logging. %s" % e) if sysinfo is None: sysinfo = SysInfoPluginRegistry() config = SysInfoConfiguration() config.load(args) for plugin in config.get_plugins(): sysinfo.add(plugin) def show_output(result): print format_sysinfo(sysinfo.get_headers(), sysinfo.get_notes(), sysinfo.get_footnotes(), indent=" ") def run_sysinfo(): return sysinfo.run().addCallback(show_output) if reactor is not None: # In case any plugins run processes or do other things that require the # reactor to already be started, we delay them until the reactor is # running. done = Deferred() reactor.callWhenRunning( lambda: maybeDeferred(run_sysinfo).chainDeferred(done)) def stop_reactor(result): # We won't need to use callLater here once we use Twisted >8. # tm:3011 reactor.callLater(0, reactor.stop) return result done.addBoth(stop_reactor) reactor.run() else: done = run_sysinfo() return done landscape-client-14.01/landscape/sysinfo/testplugin.py0000644000175000017500000000103512301414317022730 0ustar andreasandreasfrom twisted.internet.defer import succeed current_instance = None class TestPlugin(object): def __init__(self): self.sysinfo = None self.has_run = False global current_instance current_instance = self def register(self, sysinfo): self.sysinfo = sysinfo def run(self): self.has_run = True self.sysinfo.add_header("Test header", "Test value") self.sysinfo.add_note("Test note") self.sysinfo.add_footnote("Test footnote") return succeed(None) landscape-client-14.01/landscape/sysinfo/tests/0000755000175000017500000000000012301414317021323 5ustar andreasandreaslandscape-client-14.01/landscape/sysinfo/tests/test_sysinfo.py0000644000175000017500000003524612301414317024440 0ustar andreasandreasfrom cStringIO import StringIO from logging import getLogger, StreamHandler import os from twisted.internet.defer import Deferred, succeed, fail from landscape.sysinfo.sysinfo import SysInfoPluginRegistry, format_sysinfo from landscape.plugin import PluginRegistry from landscape.tests.helpers import LandscapeTest class SysInfoPluginRegistryTest(LandscapeTest): def setUp(self): super(SysInfoPluginRegistryTest, self).setUp() self.sysinfo = SysInfoPluginRegistry() self.sysinfo_logfile = StringIO() self.handler = StreamHandler(self.sysinfo_logfile) self.logger = getLogger("landscape-sysinfo") self.logger.addHandler(self.handler) def tearDown(self): super(SysInfoPluginRegistryTest, self).tearDown() self.logger.removeHandler(self.handler) def test_is_plugin_registry(self): self.assertTrue(isinstance(self.sysinfo, PluginRegistry)) def test_add_and_get_headers(self): self.sysinfo.add_header("Memory usage", "65%") self.sysinfo.add_header("Swap usage", "None") self.assertEqual( self.sysinfo.get_headers(), [("Memory usage", "65%"), ("Swap usage", "None")]) self.assertEqual(self.sysinfo.get_notes(), []) self.assertEqual(self.sysinfo.get_footnotes(), []) def test_add_same_header_twice(self): self.sysinfo.add_header("Header1", "Value1") self.sysinfo.add_header("Header2", "Value2") self.sysinfo.add_header("Header3", "Value3") self.sysinfo.add_header("Header2", "Value4") self.assertEqual(self.sysinfo.get_headers(), [("Header1", "Value1"), ("Header2", "Value4"), ("Header3", "Value3")]) def test_add_header_with_none_value(self): self.sysinfo.add_header("Header1", "Value1") self.sysinfo.add_header("Header2", None) self.sysinfo.add_header("Header3", "Value3") self.assertEqual(self.sysinfo.get_headers(), [("Header1", "Value1"), ("Header3", "Value3")]) self.sysinfo.add_header("Header2", "Value2") self.assertEqual(self.sysinfo.get_headers(), [("Header1", "Value1"), ("Header2", "Value2"), ("Header3", "Value3")]) def test_add_and_get_notes(self): self.sysinfo.add_note("Your laptop is burning!") self.sysinfo.add_note("Oh, your house too, btw.") self.assertEqual( self.sysinfo.get_notes(), ["Your laptop is burning!", "Oh, your house too, btw."]) self.assertEqual(self.sysinfo.get_headers(), []) self.assertEqual(self.sysinfo.get_footnotes(), []) def test_add_and_get_footnotes(self): self.sysinfo.add_footnote("Graphs available at http://graph") self.sysinfo.add_footnote("Go! Go!") self.assertEqual( self.sysinfo.get_footnotes(), ["Graphs available at http://graph", "Go! Go!"]) self.assertEqual(self.sysinfo.get_headers(), []) self.assertEqual(self.sysinfo.get_notes(), []) def test_run(self): class Plugin(object): def __init__(self, deferred): self._deferred = deferred def register(self, registry): pass def run(self): return self._deferred plugin_deferred1 = Deferred() plugin_deferred2 = Deferred() plugin1 = Plugin(plugin_deferred1) plugin2 = Plugin(plugin_deferred2) self.sysinfo.add(plugin1) self.sysinfo.add(plugin2) def check_result(result): self.assertEqual(result, [123, 456]) deferred = self.sysinfo.run() deferred.addBoth(check_result) self.assertEqual(deferred.called, False) plugin_deferred1.callback(123) self.assertEqual(deferred.called, False) plugin_deferred2.callback(456) self.assertEqual(deferred.called, True) plugin_exception_message = ( "There were exceptions while processing one or more plugins. " "See %s/sysinfo.log for more information.") def test_plugins_run_after_synchronous_error(self): """ Even when a plugin raises a synchronous error, other plugins will continue to be run. """ self.log_helper.ignore_errors(ZeroDivisionError) plugins_what_run = [] class BadPlugin(object): def register(self, registry): pass def run(self): plugins_what_run.append(self) 1 / 0 class GoodPlugin(object): def register(self, registry): pass def run(self): plugins_what_run.append(self) return succeed(None) plugin1 = BadPlugin() plugin2 = GoodPlugin() self.sysinfo.add(plugin1) self.sysinfo.add(plugin2) self.sysinfo.run() self.assertEqual(plugins_what_run, [plugin1, plugin2]) log = self.sysinfo_logfile.getvalue() message = "BadPlugin plugin raised an exception." self.assertIn(message, log) self.assertIn("1 / 0", log) self.assertIn("ZeroDivisionError", log) path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), [self.plugin_exception_message % path]) def test_asynchronous_errors_logged(self): self.log_helper.ignore_errors(ZeroDivisionError) class BadPlugin(object): def register(self, registry): pass def run(self): return fail(ZeroDivisionError("yay")) plugin = BadPlugin() self.sysinfo.add(plugin) self.sysinfo.run() log = self.sysinfo_logfile.getvalue() message = "BadPlugin plugin raised an exception." self.assertIn(message, log) self.assertIn("ZeroDivisionError: yay", log) path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), [self.plugin_exception_message % path]) def test_multiple_exceptions_get_one_note(self): self.log_helper.ignore_errors(ZeroDivisionError) class RegularBadPlugin(object): def register(self, registry): pass def run(self): 1 / 0 class AsyncBadPlugin(object): def register(self, registry): pass def run(self): return fail(ZeroDivisionError("Hi")) plugin1 = RegularBadPlugin() plugin2 = AsyncBadPlugin() self.sysinfo.add(plugin1) self.sysinfo.add(plugin2) self.sysinfo.run() path = os.path.expanduser("~/.landscape") self.assertEqual( self.sysinfo.get_notes(), [self.plugin_exception_message % path]) def test_exception_running_as_privileged_user(self): """ If a Plugin fails while running and the sysinfo binary is running with a uid of 0, Landscape sysinfo should write to the system logs directory. """ uid_mock = self.mocker.replace("os.getuid") uid_mock() self.mocker.count(1, max=None) self.mocker.result(0) self.mocker.replay() self.log_helper.ignore_errors(ZeroDivisionError) class AsyncBadPlugin(object): def register(self, registry): pass def run(self): return fail(ZeroDivisionError("Hi")) plugin = AsyncBadPlugin() self.sysinfo.add(plugin) self.sysinfo.run() path = "/var/log/landscape" self.assertEqual( self.sysinfo.get_notes(), [self.plugin_exception_message % path]) class FormatTest(LandscapeTest): def test_no_headers(self): output = format_sysinfo([]) self.assertEqual(output, "") def test_one_header(self): output = format_sysinfo([("Header", "Value")]) self.assertEqual(output, "Header: Value") def test_parallel_headers_with_just_enough_space(self): output = format_sysinfo([("Header1", "Value1"), ("Header2", "Value2")], width=34) self.assertEqual(output, "Header1: Value1 Header2: Value2") def test_stacked_headers_which_barely_doesnt_fit(self): output = format_sysinfo([("Header1", "Value1"), ("Header2", "Value2")], width=33) self.assertEqual(output, "Header1: Value1\nHeader2: Value2") def test_stacked_headers_with_clearly_insufficient_space(self): output = format_sysinfo([("Header1", "Value1"), ("Header2", "Value2")], width=1) self.assertEqual(output, "Header1: Value1\n" "Header2: Value2") def test_indent_headers_in_parallel_with_just_enough_space(self): output = format_sysinfo([("Header1", "Value1"), ("Header2", "Value2")], indent=">>", width=36) self.assertEqual(output, ">>Header1: Value1 Header2: Value2") def test_indent_headers_stacked_which_barely_doesnt_fit(self): output = format_sysinfo([("Header1", "Value1"), ("Header2", "Value2")], indent=">>", width=35) self.assertEqual(output, ">>Header1: Value1\n" ">>Header2: Value2") def test_parallel_and_stacked_headers(self): headers = [("Header%d" % i, "Value%d" % i) for i in range(1, 6)] output = format_sysinfo(headers) self.assertEqual( output, "Header1: Value1 Header3: Value3 Header5: Value5\n" "Header2: Value2 Header4: Value4") def test_value_alignment(self): output = format_sysinfo([("Header one", "Value one"), ("Header2", "Value2"), ("Header3", "Value3"), ("Header4", "Value4"), ("Header5", "Value five")], width=45) # These headers and values were crafted to cover several cases: # # - Header padding (Header2 and Header3) # - Value padding (Value2) # - Lack of value padding due to a missing last column (Value3) # - Lack of value padding due to being a last column (Value4) # self.assertEqual(output, "Header one: Value one Header4: Value4\n" "Header2: Value2 Header5: Value five\n" "Header3: Value3") def test_one_note(self): self.assertEqual(format_sysinfo(notes=["Something's wrong!"]), "=> Something's wrong!") def test_more_notes(self): self.assertEqual(format_sysinfo(notes=["Something's wrong", "You should look at it", "Really"]), "=> Something's wrong\n" "=> You should look at it\n" "=> Really") def test_indented_notes(self): self.assertEqual(format_sysinfo(notes=["Something's wrong", "You should look at it", "Really"], indent=">>"), ">>=> Something's wrong\n" ">>=> You should look at it\n" ">>=> Really") def test_header_and_note(self): self.assertEqual(format_sysinfo(headers=[("Header", "Value")], notes=["Note"]), "Header: Value\n" "\n" "=> Note") def test_one_footnote(self): # Pretty dumb. self.assertEqual(format_sysinfo(footnotes=["Graphs at http://..."]), "Graphs at http://...") def test_more_footnotes(self): # Still dumb. self.assertEqual(format_sysinfo(footnotes=["Graphs at http://...", "Lunch at ..."]), "Graphs at http://...\n" "Lunch at ...") def test_indented_footnotes(self): # Barely more interesting. self.assertEqual(format_sysinfo(footnotes=["Graphs at http://...", "Lunch at ..."], indent=">>"), ">>Graphs at http://...\n" ">>Lunch at ...") def test_header_and_footnote(self): # Warming up. self.assertEqual(format_sysinfo(headers=[("Header", "Value")], footnotes=["Footnote"]), "Header: Value\n" "\n" "Footnote" ) def test_header_note_and_footnote(self): # Nice. self.assertEqual(format_sysinfo(headers=[("Header", "Value")], notes=["Note"], footnotes=["Footnote"]), "Header: Value\n" "\n" "=> Note\n" "\n" "Footnote" ) def test_indented_headers_notes_and_footnotes(self): # Hot! self.assertEqual(format_sysinfo(headers=[("Header1", "Value1"), ("Header2", "Value2"), ("Header3", "Value3")], notes=["Note1", "Note2"], footnotes=["Footnote1", "Footnote2"], indent=" ", width=36), " Header1: Value1 Header3: Value3\n" " Header2: Value2\n" "\n" " => Note1\n" " => Note2\n" "\n" " Footnote1\n" " Footnote2" ) def test_wrap_long_notes(self): self.assertEqual( format_sysinfo(notes=[ "I do believe that a very long note, such as one that is " "longer than about 50 characters, should wrap at the " "specified width."], width=50, indent="Z"), """\ Z=> I do believe that a very long note, such as one that is longer than about 50 characters, should wrap at the specified width.""") landscape-client-14.01/landscape/sysinfo/tests/test_loggedinusers.py0000644000175000017500000000334512301414317025613 0ustar andreasandreasfrom landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.loggedinusers import LoggedInUsers from landscape.lib.tests.test_sysstats import FakeWhoQTest class LoggedInUsersTest(FakeWhoQTest): def setUp(self): super(LoggedInUsersTest, self).setUp() self.logged_users = LoggedInUsers() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.logged_users) def test_run_adds_header(self): self.fake_who("one two three") result = self.logged_users.run() def check_headers(result): self.assertEqual(self.sysinfo.get_headers(), [("Users logged in", "3")]) return result.addCallback(check_headers) def test_order_is_preserved_even_if_asynchronous(self): self.fake_who("one two three") self.sysinfo.add_header("Before", "1") result = self.logged_users.run() self.sysinfo.add_header("After", "2") def check_headers(result): self.assertEqual(self.sysinfo.get_headers(), [("Before", "1"), ("Users logged in", "3"), ("After", "2")]) return result.addCallback(check_headers) def test_ignore_errors_on_command(self): self.fake_who("") who = open(self.who_path, "w") who.write("#!/bin/sh\necho ERROR >&2\nexit 1\n") who.close() # Nothing bad should happen if who isn't installed, or # if anything else happens with the command execution. result = self.logged_users.run() def check_headers(result): self.assertEqual(self.sysinfo.get_headers(), []) return result.addCallback(check_headers) landscape-client-14.01/landscape/sysinfo/tests/test_load.py0000644000175000017500000000140712301414317023655 0ustar andreasandreasfrom landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.load import Load from landscape.tests.helpers import LandscapeTest class LoadTest(LandscapeTest): def setUp(self): super(LoadTest, self).setUp() self.load = Load() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.load) def test_run_returns_succeeded_deferred(self): self.assertIs(None, self.successResultOf(self.load.run())) def test_run_adds_header(self): mock = self.mocker.replace("os.getloadavg") mock() self.mocker.result((1.5, 0, 0)) self.mocker.replay() self.load.run() self.assertEqual(self.sysinfo.get_headers(), [("System load", "1.5")]) landscape-client-14.01/landscape/sysinfo/tests/test_deployment.py0000644000175000017500000002441612301414317025123 0ustar andreasandreasimport os from logging.handlers import RotatingFileHandler from logging import getLogger from twisted.internet.defer import Deferred from landscape.sysinfo.deployment import ( SysInfoConfiguration, ALL_PLUGINS, run, setup_logging, get_landscape_log_directory) from landscape.sysinfo.testplugin import TestPlugin from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.load import Load from landscape.tests.helpers import LandscapeTest, StandardIOHelper from landscape.tests.mocker import ARGS, KWARGS class DeploymentTest(LandscapeTest): def setUp(self): super(DeploymentTest, self).setUp() class TestConfiguration(SysInfoConfiguration): default_config_filenames = [self.makeFile("")] self.configuration = TestConfiguration() def test_get_plugins(self): self.configuration.load(["--sysinfo-plugins", "Load,TestPlugin", "-d", self.makeDir()]) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), 2) self.assertTrue(isinstance(plugins[0], Load)) self.assertTrue(isinstance(plugins[1], TestPlugin)) def test_get_all_plugins(self): self.configuration.load(["-d", self.makeFile()]) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), len(ALL_PLUGINS)) def test_exclude_plugins(self): exclude = ",".join(x for x in ALL_PLUGINS if x != "Load") self.configuration.load(["--exclude-sysinfo-plugins", exclude, "-d", self.makeDir()]) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), 1) self.assertTrue(isinstance(plugins[0], Load)) def test_config_file(self): filename = self.makeFile() f = open(filename, "w") f.write("[sysinfo]\nsysinfo_plugins = TestPlugin\n") f.close() self.configuration.load(["--config", filename, "-d", self.makeDir()]) plugins = self.configuration.get_plugins() self.assertEqual(len(plugins), 1) self.assertTrue(isinstance(plugins[0], TestPlugin)) class FakeReactor(object): """ Something that's easier to understand and more reusable than a bunch of mocker """ def __init__(self): self.queued_calls = [] self.scheduled_calls = [] self.running = False def callWhenRunning(self, callable): self.queued_calls.append(callable) def run(self): self.running = True def callLater(self, seconds, callable, *args, **kwargs): self.scheduled_calls.append((seconds, callable, args, kwargs)) def stop(self): self.running = False class RunTest(LandscapeTest): helpers = [StandardIOHelper] def setUp(self): super(RunTest, self).setUp() self._old_filenames = SysInfoConfiguration.default_config_filenames SysInfoConfiguration.default_config_filenames = [self.makeFile("")] def tearDown(self): super(RunTest, self).tearDown() SysInfoConfiguration.default_config_filenames = self._old_filenames logger = getLogger("landscape-sysinfo") for handler in logger.handlers[:]: logger.removeHandler(handler) def test_registry_runs_plugin_and_gets_correct_information(self): run(["--sysinfo-plugins", "TestPlugin"]) from landscape.sysinfo.testplugin import current_instance self.assertEqual(current_instance.has_run, True) sysinfo = current_instance.sysinfo self.assertEqual(sysinfo.get_headers(), [("Test header", "Test value")]) self.assertEqual(sysinfo.get_notes(), ["Test note"]) self.assertEqual(sysinfo.get_footnotes(), ["Test footnote"]) def test_format_sysinfo_gets_correct_information(self): format_sysinfo = self.mocker.replace("landscape.sysinfo.sysinfo." "format_sysinfo") format_sysinfo([("Test header", "Test value")], ["Test note"], ["Test footnote"], indent=" ") format_sysinfo(ARGS, KWARGS) self.mocker.count(0) self.mocker.replay() run(["--sysinfo-plugins", "TestPlugin"]) def test_format_sysinfo_output_is_printed(self): format_sysinfo = self.mocker.replace("landscape.sysinfo.sysinfo." "format_sysinfo") format_sysinfo(ARGS, KWARGS) self.mocker.result("Hello there!") self.mocker.replay() run(["--sysinfo-plugins", "TestPlugin"]) self.assertEqual(self.stdout.getvalue(), "Hello there!\n") def test_output_is_only_displayed_once_deferred_fires(self): deferred = Deferred() sysinfo = self.mocker.patch(SysInfoPluginRegistry) sysinfo.run() self.mocker.passthrough() self.mocker.result(deferred) self.mocker.replay() run(["--sysinfo-plugins", "TestPlugin"]) self.assertNotIn("Test note", self.stdout.getvalue()) deferred.callback(None) self.assertIn("Test note", self.stdout.getvalue()) def test_default_arguments_load_default_plugins(self): result = run([]) def check_result(result): self.assertIn("System load", self.stdout.getvalue()) self.assertNotIn("Test note", self.stdout.getvalue()) return result.addCallback(check_result) def test_plugins_called_after_reactor_starts(self): """ Plugins are invoked after the reactor has started, so that they can spawn processes without concern for race conditions. """ reactor = FakeReactor() d = run(["--sysinfo-plugins", "TestPlugin"], reactor=reactor) self.assertEqual(self.stdout.getvalue(), "") self.assertTrue(reactor.running) for x in reactor.queued_calls: x() self.assertEqual( self.stdout.getvalue(), " Test header: Test value\n\n => Test note\n\n Test footnote\n") return d def test_stop_scheduled_in_callback(self): """ Because of tm:3011, reactor.stop() must be called in a scheduled call. """ reactor = FakeReactor() d = run(["--sysinfo-plugins", "TestPlugin"], reactor=reactor) for x in reactor.queued_calls: x() self.assertEqual(reactor.scheduled_calls, [(0, reactor.stop, (), {})]) return d def test_stop_reactor_even_when_sync_exception_from_sysinfo_run(self): """ Even when there's a synchronous exception from run_sysinfo, the reactor should be stopped. """ self.log_helper.ignore_errors(ZeroDivisionError) reactor = FakeReactor() sysinfo = SysInfoPluginRegistry() sysinfo.run = lambda: 1 / 0 d = run(["--sysinfo-plugins", "TestPlugin"], reactor=reactor, sysinfo=sysinfo) for x in reactor.queued_calls: x() self.assertEqual(reactor.scheduled_calls, [(0, reactor.stop, (), {})]) return self.assertFailure(d, ZeroDivisionError) def test_get_landscape_log_directory_unprivileged(self): """ If landscape-sysinfo is running as a non-privileged user the log directory is stored in their home directory. """ self.assertEqual(get_landscape_log_directory(), os.path.expanduser("~/.landscape")) def test_get_landscape_log_directory_privileged(self): """ If landscape-sysinfo is running as a privileged user, then the logs should be stored in the system-wide log directory. """ uid_mock = self.mocker.replace("os.getuid") uid_mock() self.mocker.result(0) self.mocker.replay() self.assertEqual(get_landscape_log_directory(), "/var/log/landscape") def test_wb_logging_setup(self): """ setup_logging sets up a "landscape-sysinfo" logger which rotates every week and does not propagate logs to higher-level handlers. """ # This hecka whiteboxes but there aren't any underscores! logger = getLogger("landscape-sysinfo") self.assertEqual(logger.handlers, []) setup_logging(landscape_dir=self.makeDir()) logger = getLogger("landscape-sysinfo") self.assertEqual(len(logger.handlers), 1) handler = logger.handlers[0] self.assertTrue(isinstance(handler, RotatingFileHandler)) self.assertEqual(handler.maxBytes, 500 * 1024) self.assertEqual(handler.backupCount, 1) self.assertFalse(logger.propagate) def test_setup_logging_logs_to_var_log_if_run_as_root(self): mock_os = self.mocker.replace("os") mock_os.getuid() self.mocker.result(0) # Ugh, sorry mock_os.path.isdir("/var/log/landscape") self.mocker.result(False) mock_os.mkdir("/var/log/landscape") self.mocker.replace("__builtin__.open", passthrough=False)( "/var/log/landscape/sysinfo.log", "a") self.mocker.replay() logger = getLogger("landscape-sysinfo") self.assertEqual(logger.handlers, []) setup_logging() handler = logger.handlers[0] self.assertTrue(isinstance(handler, RotatingFileHandler)) self.assertEqual(handler.baseFilename, "/var/log/landscape/sysinfo.log") def test_create_log_dir(self): log_dir = self.makeFile() self.assertFalse(os.path.exists(log_dir)) setup_logging(landscape_dir=log_dir) self.assertTrue(os.path.exists(log_dir)) def test_run_sets_up_logging(self): setup_logging_mock = self.mocker.replace( "landscape.sysinfo.deployment.setup_logging") setup_logging_mock() self.mocker.replay() run(["--sysinfo-plugins", "TestPlugin"]) def test_run_setup_logging_exits_gracefully(self): setup_logging_mock = self.mocker.replace( "landscape.sysinfo.deployment.setup_logging") setup_logging_mock() self.mocker.throw(IOError("Read-only filesystem.")) self.mocker.replay() error = self.assertRaises(SystemExit, run, ["--sysinfo-plugins", "TestPlugin"]) self.assertEqual(error.message, "Unable to setup logging. Read-only filesystem.") landscape-client-14.01/landscape/sysinfo/tests/test_processes.py0000644000175000017500000000414012301414317024741 0ustar andreasandreasfrom twisted.internet.defer import Deferred from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.processes import Processes from landscape.tests.helpers import LandscapeTest, ProcessDataBuilder class ProcessesTest(LandscapeTest): def setUp(self): super(ProcessesTest, self).setUp() self.fake_proc = self.makeDir() self.processes = Processes(proc_dir=self.fake_proc) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.processes) self.builder = ProcessDataBuilder(self.fake_proc) def test_run_returns_succeeded_deferred(self): result = self.processes.run() self.assertTrue(isinstance(result, Deferred)) called = [] def callback(result): called.append(True) result.addCallback(callback) self.assertTrue(called) def test_number_of_processes(self): """The number of processes is added as a header.""" for i in range(3): self.builder.create_data(i, self.builder.RUNNING, uid=0, gid=0, process_name="foo%d" % (i,)) self.processes.run() self.assertEqual(self.sysinfo.get_headers(), [("Processes", "3")]) def test_no_zombies(self): self.processes.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_number_of_zombies(self): """The number of zombies is added as a note.""" self.builder.create_data(99, self.builder.ZOMBIE, uid=0, gid=0, process_name="ZOMBERS") self.processes.run() self.assertEqual(self.sysinfo.get_notes(), ["There is 1 zombie process."]) def test_multiple_zombies(self): """Stupid English, and its plurality""" for i in range(2): self.builder.create_data(i, self.builder.ZOMBIE, uid=0, gid=0, process_name="ZOMBERS%d" % (i,)) self.processes.run() self.assertEqual(self.sysinfo.get_notes(), ["There are 2 zombie processes."]) landscape-client-14.01/landscape/sysinfo/tests/test_disk.py0000644000175000017500000002300312301414317023664 0ustar andreasandreasfrom twisted.internet.defer import Deferred from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.disk import Disk, format_megabytes from landscape.tests.helpers import LandscapeTest class DiskTest(LandscapeTest): def setUp(self): super(DiskTest, self).setUp() self.mount_file = self.makeFile("") self.stat_results = {} self.disk = Disk(mounts_file=self.mount_file, statvfs=self.stat_results.get) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.disk) def add_mount(self, point, block_size=4096, capacity=1000, unused=1000, fs="ext3", device=None): if device is None: device = "/dev/" + point.replace("/", "_") self.stat_results[point] = (block_size, 0, capacity, unused, 0, 0, 0, 0, 0) f = open(self.mount_file, "a") f.write("/dev/%s %s %s rw 0 0\n" % (device, point, fs)) f.close() def test_run_returns_succeeded_deferred(self): self.add_mount("/") result = self.disk.run() self.assertTrue(isinstance(result, Deferred)) called = [] def callback(result): called.append(True) result.addCallback(callback) self.assertTrue(called) def test_everything_is_cool(self): self.add_mount("/") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_zero_total_space(self): """ When the total space for a mount is 0, the plugin shouldn't flip out and kill everybody. This is a regression test for a ZeroDivisionError! """ self.add_mount("/sys", capacity=0, unused=0) self.add_mount("/") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_zero_total_space_for_home(self): """ When the total space for /home is 0, we'll fall back to /. """ self.add_mount("/home", capacity=0, unused=0) self.add_mount("/", capacity=1000, unused=1000) self.disk.run() self.assertEqual(self.sysinfo.get_headers(), [("Usage of /", "0.0% of 3MB")]) def test_zero_total_space_for_home_and_root(self): """ In a very strange situation, when both /home and / have a capacity of 0, we'll show 'unknown' for the usage of /. """ self.add_mount("/home", capacity=0, unused=0) self.add_mount("/", capacity=0, unused=0) self.disk.run() self.assertEqual(self.sysinfo.get_headers(), [("Usage of /", "unknown")]) def test_over_85_percent(self): """ When a filesystem is using more than 85% capacity, a note will be displayed. """ self.add_mount("/", capacity=1000000, unused=150000) self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 85.0% of 3.81GB"]) def test_under_85_percent(self): """No note is displayed for a filesystem using less than 85% capacity. """ self.add_mount("/", block_size=1024, capacity=1000000, unused=151000) self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_multiple_notes(self): """ A note will be displayed for each filesystem using 85% or more capacity. """ self.add_mount("/", block_size=1024, capacity=1000000, unused=150000) self.add_mount( "/use", block_size=2048, capacity=2000000, unused=200000) self.add_mount( "/emp", block_size=4096, capacity=3000000, unused=460000) self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 85.0% of 976MB", "/use is using 90.0% of 3.81GB"]) def test_format_megabytes(self): self.assertEqual(format_megabytes(100), "100MB") self.assertEqual(format_megabytes(1023), "1023MB") self.assertEqual(format_megabytes(1024), "1.00GB") self.assertEqual(format_megabytes(1024 * 1024 - 1), "1024.00GB") self.assertEqual(format_megabytes(1024 * 1024), "1.00TB") def test_header(self): """ A header is printed with usage for the 'primary' filesystem, where 'primary' means 'filesystem that has /home on it'. """ self.add_mount("/") self.add_mount("/home", capacity=1024, unused=512) self.disk.run() self.assertEqual(self.sysinfo.get_headers(), [("Usage of /home", "50.0% of 4MB")]) def test_header_shows_actual_filesystem(self): """ If /home isn't on its own filesystem, the header will show whatever filesystem it's a part of. """ self.add_mount("/", capacity=1024, unused=512) self.disk.run() self.assertEqual(self.sysinfo.get_headers(), [("Usage of /", "50.0% of 4MB")]) def test_ignore_boring_filesystem_types(self): """ Optical drives (those with filesystems of udf or iso9660) should be ignored. Also, gvfs mounts should be ignored, because they actually reflect the size of /. """ self.add_mount("/", capacity=1000, unused=1000, fs="ext3") self.add_mount("/media/dvdrom", capacity=1000, unused=0, fs="udf") self.add_mount("/media/cdrom", capacity=1000, unused=0, fs="iso9660") self.add_mount("/home/radix/.gvfs", capacity=1000, unused=0, fs="fuse.gvfs-fuse-daemon") self.add_mount("/mnt/livecd", capacity=1000, unused=0, fs="squashfs") self.add_mount("/home/mg/.Private", capacity=1000, unused=0, fs="ecryptfs") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_no_duplicate_roots(self): self.add_mount("/", capacity=0, unused=0, fs="ext4") self.add_mount("/", capacity=1000, unused=1, fs="ext3") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 100.0% of 3MB"]) def test_no_duplicate_devices(self): self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") self.add_mount("/dev/.static/dev", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 100.0% of 3MB"]) def test_shorter_mount_point_in_case_of_duplicate_devices(self): self.add_mount("/dev/.static/dev", capacity=1000, unused=1, device="/dev/horgle") self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 100.0% of 3MB"]) def test_shorter_not_lexical(self): """ This is a test for a fix for a regression, because I accidentally took the lexically "smallest" mount point instead of the shortest one. """ self.add_mount("/") self.add_mount("/abc", capacity=1000, unused=1, device="/dev/horgle") self.add_mount("/b", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/b is using 100.0% of 3MB"]) def test_duplicate_device_and_duplicate_mountpoint_horribleness(self): """ Consider the following: rootfs / rootfs rw 0 0 /dev/disk/by-uuid/c4144... / ext3 rw,... 0 0 /dev/disk/by-uuid/c4144... /dev/.static/dev ext3 rw,... 0 0 (taken from an actual /proc/mounts in Hardy) "/", the mount point, is duplicate-mounted *and* (one of) the devices mounted to "/" is also duplicate-mounted. Only "/" should be warned about in this case. """ self.add_mount("/", capacity=0, unused=0, device="rootfs") self.add_mount("/", capacity=1000, unused=1, device="/dev/horgle") self.add_mount("/dev/.static/dev", capacity=1000, unused=1, device="/dev/horgle") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), ["/ is using 100.0% of 3MB"]) def test_ignore_filesystems(self): """ Network filesystems like nfs are ignored, because they can stall randomly in stat. """ self.add_mount("/", capacity=1000, unused=1000, fs="ext3") self.add_mount("/mnt/disk1", capacity=1000, unused=0, fs="nfs") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) def test_nfs_as_root(self): """ If / is not a whitelist filesystem, we don't report the usage of /home. """ self.add_mount("/", capacity=1000, unused=1000, fs="nfs") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) self.assertEqual(self.sysinfo.get_headers(), [("Usage of /home", "unknown")]) def test_nfs_as_root_but_not_home(self): """ If / is not a whitelist filesystem, but that /home is with a weird stat value, we don't report the usage of /home. """ self.add_mount("/", capacity=1000, unused=1000, fs="nfs") self.add_mount("/home", capacity=0, unused=0, fs="ext3") self.disk.run() self.assertEqual(self.sysinfo.get_notes(), []) self.assertEqual(self.sysinfo.get_headers(), [("Usage of /home", "unknown")]) landscape-client-14.01/landscape/sysinfo/tests/test_temperature.py0000644000175000017500000000324612301414317025276 0ustar andreasandreasimport os from landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.temperature import Temperature from landscape.lib.tests.test_sysstats import ThermalZoneTest class TemperatureTest(ThermalZoneTest): def setUp(self): super(TemperatureTest, self).setUp() self.temperature = Temperature(self.thermal_zone_path) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.temperature) def test_run_returns_succeeded_deferred(self): self.assertIs(None, self.successResultOf(self.temperature.run())) def test_run_adds_header(self): self.write_thermal_zone("THM0", "51 C") self.temperature.run() self.assertEqual(self.sysinfo.get_headers(), [("Temperature", "51 C")]) def test_ignores_bad_files(self): self.write_thermal_zone("THM0", "") temperature_path = os.path.join(self.thermal_zone_path, "THM0/temperature") file = open(temperature_path, "w") file.write("bad-label: 51 C") file.close() self.temperature.run() self.assertEqual(self.sysinfo.get_headers(), []) def test_ignores_unknown_formats(self): self.write_thermal_zone("THM0", "FOO C") self.temperature.run() self.assertEqual(self.sysinfo.get_headers(), []) def test_picks_highest_temperature(self): self.write_thermal_zone("THM0", "51 C") self.write_thermal_zone("THM1", "53 C") self.write_thermal_zone("THM2", "52 C") self.temperature.run() self.assertEqual(self.sysinfo.get_headers(), [("Temperature", "53 C")]) landscape-client-14.01/landscape/sysinfo/tests/test_landscapelink.py0000644000175000017500000000144212301414317025545 0ustar andreasandreasfrom landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.landscapelink import LandscapeLink from landscape.tests.helpers import LandscapeTest class LandscapeLinkTest(LandscapeTest): def setUp(self): super(LandscapeLinkTest, self).setUp() self.landscape_link = LandscapeLink() self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.landscape_link) def test_run_returns_succeeded_deferred(self): self.assertIs(None, self.successResultOf(self.landscape_link.run())) def test_run_adds_footnote(self): self.landscape_link.run() self.assertEqual( self.sysinfo.get_footnotes(), ["Graph this data and manage this system at:\n" " https://landscape.canonical.com/"]) landscape-client-14.01/landscape/sysinfo/tests/__init__.py0000644000175000017500000000000012301414317023422 0ustar andreasandreaslandscape-client-14.01/landscape/sysinfo/tests/test_memory.py0000644000175000017500000000272112301414317024246 0ustar andreasandreasfrom landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.memory import Memory from landscape.tests.helpers import LandscapeTest MEMINFO_SAMPLE = """ MemTotal: 2074536 kB MemFree: 436468 kB Buffers: 385596 kB Cached: 672856 kB SwapCached: 0 kB Active: 708424 kB Inactive: 705292 kB HighTotal: 1178432 kB HighFree: 137220 kB LowTotal: 896104 kB LowFree: 299248 kB SwapTotal: 2562356 kB SwapFree: 1562356 kB Dirty: 300 kB Writeback: 0 kB AnonPages: 355388 kB Mapped: 105028 kB Slab: 152664 kB SReclaimable: 136372 kB SUnreclaim: 16292 kB PageTables: 3124 kB NFS_Unstable: 0 kB Bounce: 0 kB CommitLimit: 3599624 kB Committed_AS: 1136296 kB VmallocTotal: 114680 kB VmallocUsed: 27796 kB VmallocChunk: 86764 kB """ class MemoryTest(LandscapeTest): def setUp(self): super(MemoryTest, self).setUp() self.memory = Memory(self.makeFile(MEMINFO_SAMPLE)) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.memory) def test_run_returns_succeeded_deferred(self): self.assertIs(None, self.successResultOf(self.memory.run())) def test_run_adds_header(self): self.memory.run() self.assertEqual(self.sysinfo.get_headers(), [("Memory usage", "27%"), ("Swap usage", "39%")]) landscape-client-14.01/landscape/sysinfo/tests/test_network.py0000644000175000017500000000237412301414317024433 0ustar andreasandreasfrom landscape.sysinfo.sysinfo import SysInfoPluginRegistry from landscape.sysinfo.network import Network from landscape.tests.helpers import LandscapeTest class NetworkTest(LandscapeTest): def setUp(self): super(NetworkTest, self).setUp() self.result = [] self.network = Network(lambda: self.result) self.sysinfo = SysInfoPluginRegistry() self.sysinfo.add(self.network) def test_run_returns_succeeded_deferred(self): """L{Network.run} always returns a succeeded C{Deferred}.""" self.assertIs(None, self.successResultOf(self.network.run())) def test_run_adds_header(self): """ A header is written to sysinfo output for each network device reported by L{get_active_device_info}. """ self.result = [{"interface": "eth0", "ip_address": "192.168.0.50"}] self.network.run() self.assertEqual([("IP address for eth0", "192.168.0.50")], self.sysinfo.get_headers()) def test_run_without_network_devices(self): """ If no network device information is available, no headers are added to the sysinfo output. """ self.network.run() self.assertEqual([], self.sysinfo.get_headers()) landscape-client-14.01/landscape/sysinfo/landscapelink.py0000644000175000017500000000052012301414317023340 0ustar andreasandreasfrom twisted.internet.defer import succeed class LandscapeLink(object): def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_footnote( "Graph this data and manage this system at:\n" " https://landscape.canonical.com/") return succeed(None) landscape-client-14.01/landscape/sysinfo/memory.py0000644000175000017500000000113612301414317022044 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.lib.sysstats import MemoryStats class Memory(object): def __init__(self, filename="/proc/meminfo"): self._filename = filename def register(self, sysinfo): self._sysinfo = sysinfo def run(self): memstats = MemoryStats(self._filename) self._sysinfo.add_header("Memory usage", "%d%%" % memstats.used_memory_percentage) self._sysinfo.add_header("Swap usage", "%d%%" % memstats.used_swap_percentage) return succeed(None) landscape-client-14.01/landscape/sysinfo/__init__.py0000644000175000017500000000000012301414317022260 0ustar andreasandreaslandscape-client-14.01/landscape/sysinfo/sysinfo.py0000644000175000017500000002333012301414317022226 0ustar andreasandreasimport textwrap from logging import getLogger import math import os from twisted.python.failure import Failure from landscape.lib.twisted_util import gather_results from landscape.lib.log import log_failure from landscape.plugin import PluginRegistry class SysInfoPluginRegistry(PluginRegistry): """ When the sysinfo plugin registry is run, it will run each of the registered plugins so that they get a chance to feed information into the registry. There are three kinds of details collected: headers, notes, and footnotes. They are presented to the user in a way similar to the following: Header1: Value1 Header3: Value3 Header2: Value2 Header4: Value4 => This is first note => This is the second note The first footnote. The second footnote. Headers are supposed to display information which is regularly available, such as the load and temperature of the system. Notes contain eventual information, such as warnings of high temperatures, and low disk space. Finally, footnotes contain pointers to further information such as URLs. """ def __init__(self): super(SysInfoPluginRegistry, self).__init__() self._header_index = {} self._headers = [] self._notes = [] self._footnotes = [] self._plugin_error = False def add_header(self, name, value): """Add a new information header to be displayed to the user. Each header name is only present once. If a header is added multiple times, the last value added will be returned in the get_headers() call. Headers with value None are not returned by get_headers(), but they still allocate a position in the list. This fact may be explored to create a deterministic ordering even when dealing with values obtained asynchornously. """ index = self._header_index.get(name) if index is None: self._header_index[name] = len(self._headers) self._headers.append((name, value)) else: self._headers[index] = (name, value) def get_headers(self): """Get all information headers to be displayed to the user. Headers which were added with value None are not included in the result. """ return [pair for pair in self._headers if pair[1] is not None] def add_note(self, note): """Add a new eventual note to be shown up to the administrator.""" self._notes.append(note) def get_notes(self): """Get all eventual notes to be shown up to the administrator.""" return self._notes def add_footnote(self, note): """Add a new footnote to be shown up to the administrator.""" self._footnotes.append(note) def get_footnotes(self): """Get all footnotes to be shown up to the administrator.""" return self._footnotes def run(self): """Run all plugins, and return a deferred aggregating their results. This will call the run() method on each of the registered plugins, and return a deferred which aggregates each resulting deferred. """ deferreds = [] for plugin in self.get_plugins(): try: result = plugin.run() except: self._log_plugin_error(Failure(), plugin) else: result.addErrback(self._log_plugin_error, plugin) deferreds.append(result) return gather_results(deferreds).addCallback(self._report_error_note) def _log_plugin_error(self, failure, plugin): self._plugin_error = True message = "%s plugin raised an exception." % plugin.__class__.__name__ logger = getLogger("landscape-sysinfo") log_failure(failure, message, logger=logger) def _report_error_note(self, result): from landscape.sysinfo.deployment import get_landscape_log_directory if self._plugin_error: path = os.path.join(get_landscape_log_directory(), "sysinfo.log") self.add_note( "There were exceptions while processing one or more plugins. " "See %s for more information." % path) return result def format_sysinfo(headers=(), notes=(), footnotes=(), width=80, indent="", column_separator=" ", note_prefix="=> "): """Format sysinfo headers, notes and footnotes to be displayed. This function will format headers notes and footnotes in a way that looks similar to the following: Header1: Value1 Header3: Value3 Header2: Value2 Header4: Value4 => This is first note => This is the second note The first footnote. The second footnote. Header columns will be dynamically adjusted to conform to the size of header labels and values. """ # Indentation spacing is easier to handle if we just take it off the width. width -= len(indent) headers_len = len(headers) value_separator = ": " # Compute the number of columns in the header. To do that, we first # do a rough estimative of the maximum number of columns feasible, # and then we go back from there until we can fit things. min_length = width for header, value in headers: min_length = min(min_length, len(header)+len(value)+2) # 2 for ": " columns = int(math.ceil(float(width) / (min_length + len(column_separator)))) # Okay, we've got a base for the number of columns. Now, since # columns may have different lengths, and the length of each column # will change as we compress headers in less and less columns, we # have to perform some backtracking to compute a good feasible number # of columns. while True: # Check if the current number of columns would fit in the screen. # Note that headers are indented like this: # # Header: First value # Another header: Value # # So the column length is the sum of the widest header, plus the # widest value, plus the value separator. headers_per_column = int(math.ceil(headers_len / float(columns))) header_lengths = [] total_length = 0 for column in range(columns): # We must find the widest header and value, both to compute the # column length, and also to compute per-column padding when # outputing it. widest_header_len = 0 widest_value_len = 0 for row in range(headers_per_column): header_index = column * headers_per_column + row # There are potentially less headers in the last column, # so let's watch out for these here. if header_index < headers_len: header, value = headers[header_index] widest_header_len = max(widest_header_len, len(header)) widest_value_len = max(widest_value_len, len(value)) if column > 0: # Account for the spacing between each column. total_length += len(column_separator) total_length += (widest_header_len + widest_value_len + len(value_separator)) # Keep track of these lengths for building the output later. header_lengths.append((widest_header_len, widest_value_len)) if columns == 1 or total_length < width: # If there's just one column, or if we're within the requested # length, we're good to go. break # Otherwise, do the whole thing again with one less column. columns -= 1 # Alright! Show time! Let's build the headers line by line. lines = [] for row in range(headers_per_column): line = indent # Pick all columns for this line. Note that this means that # for 4 headers with 2 columns, we pick header 0 and 2 for # the first line, since we show headers 0 and 1 in the first # column, and headers 2 and 3 in the second one. for column in range(columns): header_index = column * headers_per_column + row # There are potentially less headers in the last column, so # let's watch out for these here. if header_index < headers_len: header, value = headers[header_index] # Get the widest header/value on this column, for padding. widest_header_len, widest_value_len = header_lengths[column] if column > 0: # Add inter-column spacing. line += column_separator # And append the column to the current line. line += (header + value_separator + " " * (widest_header_len - len(header)) + value) # If there are more columns in this line, pad it up so # that the next column's header is correctly aligned. if headers_len > (column+1) * headers_per_column + row: line += " " * (widest_value_len - len(value)) lines.append(line) if notes: if lines: # Some spacing between headers and notes. lines.append("") initial_indent = indent + note_prefix for note in notes: lines.extend( textwrap.wrap(note, initial_indent=initial_indent, subsequent_indent=" "*len(initial_indent), width=width)) if footnotes: if lines: lines.append("") lines.extend(indent + footnote for footnote in footnotes) return "\n".join(lines) landscape-client-14.01/landscape/sysinfo/disk.py0000644000175000017500000000502212301414317021464 0ustar andreasandreasfrom __future__ import division import os from twisted.internet.defer import succeed from landscape.lib.disk import (get_mount_info, get_filesystem_for_path) def format_megabytes(megabytes): if megabytes >= 1024 * 1024: return "%.2fTB" % (megabytes / (1024 * 1024)) elif megabytes >= 1024: return "%.2fGB" % (megabytes / 1024) else: return "%dMB" % (megabytes) def usage(info): total = info["total-space"] used = total - info["free-space"] return "%0.1f%% of %s" % ((used / total) * 100, format_megabytes(total)) class Disk(object): def __init__(self, mounts_file="/proc/mounts", statvfs=os.statvfs): self._mounts_file = mounts_file self._statvfs = statvfs def register(self, sysinfo): self._sysinfo = sysinfo def run(self): main_info = get_filesystem_for_path("/home", self._mounts_file, self._statvfs) if main_info is not None: total = main_info["total-space"] if total <= 0: root_main_info = get_filesystem_for_path( "/", self._mounts_file, self._statvfs) if root_main_info is not None: total = root_main_info["total-space"] main_info = root_main_info if total <= 0: main_usage = "unknown" else: main_usage = usage(main_info) self._sysinfo.add_header("Usage of " + main_info["mount-point"], main_usage) else: self._sysinfo.add_header("Usage of /home", "unknown") seen_mounts = set() seen_devices = set() infos = list(get_mount_info(self._mounts_file, self._statvfs)) infos.sort(key=lambda i: len(i["mount-point"])) for info in infos: total = info["total-space"] mount_seen = info["mount-point"] in seen_mounts device_seen = info["device"] in seen_devices seen_mounts.add(info["mount-point"]) seen_devices.add(info["device"]) if mount_seen or device_seen: continue if total <= 0: # Some "virtual" filesystems have 0 total space. ignore them. continue used = ((total - info["free-space"]) / total) * 100 if used >= 85: self._sysinfo.add_note("%s is using %s" % (info["mount-point"], usage(info))) return succeed(None) landscape-client-14.01/landscape/sysinfo/temperature.py0000644000175000017500000000142412301414317023071 0ustar andreasandreasfrom twisted.internet.defer import succeed from landscape.lib.sysstats import get_thermal_zones class Temperature(object): def __init__(self, thermal_zone_path=None): self._thermal_zone_path = thermal_zone_path def register(self, sysinfo): self._sysinfo = sysinfo def run(self): temperature = None max_value = None for zone in get_thermal_zones(self._thermal_zone_path): if (zone.temperature_value is not None and (max_value is None or zone.temperature_value > max_value)): temperature = zone.temperature max_value = zone.temperature_value if temperature is not None: self._sysinfo.add_header("Temperature", temperature) return succeed(None) landscape-client-14.01/landscape/sysinfo/load.py0000644000175000017500000000041012301414317021445 0ustar andreasandreasimport os from twisted.internet.defer import succeed class Load(object): def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_header("System load", str(os.getloadavg()[0])) return succeed(None) landscape-client-14.01/landscape/ui/0000755000175000017500000000000012301414317017104 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/0000755000175000017500000000000012301414317020204 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/configuration/0000755000175000017500000000000012301414317023053 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/configuration/state.py0000644000175000017500000004736612301414317024565 0ustar andreasandreasimport copy from landscape.lib.network import get_fqdn from landscape.ui.constants import CANONICAL_MANAGED, NOT_MANAGED from landscape.ui.model.configuration.proxy import ConfigurationProxy HOSTED_LANDSCAPE_HOST = "landscape.canonical.com" LOCAL_LANDSCAPE_HOST = "" HOSTED_ACCOUNT_NAME = "" LOCAL_ACCOUNT_NAME = "standalone" HOSTED_PASSWORD = "" LOCAL_PASSWORD = "" HOSTED = "hosted" LOCAL = "local" MANAGEMENT_TYPE = "management-type" COMPUTER_TITLE = "computer-title" LANDSCAPE_HOST = "landscape-host" ACCOUNT_NAME = "account-name" PASSWORD = "password" DEFAULT_DATA = { MANAGEMENT_TYPE: NOT_MANAGED, COMPUTER_TITLE: get_fqdn(), HOSTED: { LANDSCAPE_HOST: HOSTED_LANDSCAPE_HOST, ACCOUNT_NAME: HOSTED_ACCOUNT_NAME, PASSWORD: HOSTED_PASSWORD}, LOCAL: { LANDSCAPE_HOST: LOCAL_LANDSCAPE_HOST, ACCOUNT_NAME: LOCAL_ACCOUNT_NAME, PASSWORD: LOCAL_PASSWORD}} def derive_server_host_name_from_url(url): """ Extract the hostname part from a URL. """ try: without_protocol = url[url.index("://") + 3:] except ValueError: without_protocol = url try: return without_protocol[:without_protocol.index("/")] except ValueError: return without_protocol def derive_url_from_host_name(host_name): """ Extrapolate a url from a host name. """ #Reuse this code to make sure it's a proper host name host_name = derive_server_host_name_from_url(host_name) return "https://" + host_name + "/message-system" def derive_ping_url_from_host_name(host_name): """ Extrapolate a ping_url from a host name. """ #Reuse this code to make sure it's a proper host name host_name = derive_server_host_name_from_url(host_name) return "http://" + host_name + "/ping" class StateError(Exception): """ An exception that is raised when there is an error relating to the current state. """ class TransitionError(Exception): """ An L{Exception} that is raised when a valid transition between states fails for some non state related reason. For example, this error is raised when the user does not have the privilege of reading the configuration file, this causes the transition from L{VirginState} to L{InitialisedState} to fail but not because that transition from one state to another was not permitted, but rather the transition encountered an error. """ class ConfigurationState(object): """ Base class for states used in the L{ConfigurationModel}. """ def __init__(self, data, proxy, uisettings): self._data = copy.deepcopy(data) self._proxy = proxy self._uisettings = uisettings def get_config_filename(self): return self._proxy.get_config_filename() def get(self, *args): """ Retrieve only valid values from two level dictionary based tree. This mainly served to pick up programming errors and could easily be replaced with a simpler scheme. """ arglen = len(args) if arglen > 2 or arglen == 0: raise TypeError( "get() takes either 1 or 2 keys (%d given)" % arglen) if arglen == 2: # We're looking for a leaf on a branch sub_dict = None if args[0] in [HOSTED, LOCAL]: sub_dict = self._data.get(args[0], {}) sub_dict = self._data[args[0]] if not isinstance(sub_dict, dict): raise KeyError( "Compound key [%s][%s] is invalid. The data type " + "returned from the first index was %s." % sub_dict.__class__.__name__) return sub_dict.get(args[1], None) else: if args[0] in (MANAGEMENT_TYPE, COMPUTER_TITLE): return self._data.get(args[0], None) else: raise KeyError("Key [%s] is invalid. " % args[0]) def set(self, *args): """ Set only valid values from two level dictionary based tree. This mainly served to pick up programming errors and could easily be replaced with a simpler scheme. """ arglen = len(args) if arglen < 2 or arglen > 3: raise TypeError("set() takes either 1 or 2 keys and exactly 1 " + "value (%d arguments given)" % arglen) if arglen == 2: # We're setting a leaf attached to the root self._data[args[0]] = args[1] else: # We're setting a leaf on a branch sub_dict = None if args[0] in [HOSTED, LOCAL]: sub_dict = self._data.get(args[0], {}) if not isinstance(sub_dict, dict): raise KeyError("Compound key [%s][%s] is invalid. The data " + "type returned from the first index was %s." % sub_dict.__class__.__name__) sub_dict[args[1]] = args[2] self._data[args[0]] = sub_dict def load_data(self, asynchronous=True, exit_method=None): raise NotImplementedError def modify(self): raise NotImplementedError def revert(self): raise NotImplementedError def persist(self): raise NotImplementedError def exit(self, asynchronous=True, exit_method=None): return ExitedState(self._data, self._proxy, self._uisettings, asynchronous=asynchronous, exit_method=exit_method) class Helper(object): """ Base class for all state transition helpers. It is assumed that the Helper classes are "friends" of the L{ConfigurationState} classes and can have some knowledge of their internals. They shouldn't be visible to users of the L{ConfigurationState}s and in general we should avoid seeing the L{ConfigurationState}'s _data attribute outside this module. """ def __init__(self, state): self._state = state class ModifiableHelper(Helper): """ Allow a L{ConfigurationState}s to be modified. """ def modify(self): return ModifiedState(self._state._data, self._state._proxy, self._state._uisettings) class UnloadableHelper(Helper): """ Disallow loading of data into a L{ConfigurationModel}. """ def load_data(self, asynchronous=True, exit_method=None): raise StateError("A ConfiguratiomModel in a " + self.__class__.__name__ + " cannot be transitioned via load_data()") class UnmodifiableHelper(Helper): """ Disallow modification of a L{ConfigurationState}. """ def modify(self): raise StateError("A ConfigurationModel in " + self.__class__.__name__ + " cannot transition via modify()") class RevertableHelper(Helper): """ Allow reverting of a L{ConfigurationModel}. """ def revert(self): return InitialisedState(self._state._data, self._state._proxy, self._state._uisettings) class UnrevertableHelper(Helper): """ Disallow reverting of a L{ConfigurationModel}. """ def revert(self): raise StateError("A ConfigurationModel in " + self.__class__.__name__ + " cannot transition via revert()") class PersistableHelper(Helper): """ Allow a L{ConfigurationModel} to persist. """ def _save_to_uisettings(self): """ Persist full content to the L{UISettings} object. """ self._state._uisettings.set_management_type( self._state.get(MANAGEMENT_TYPE)) self._state._uisettings.set_computer_title( self._state.get(COMPUTER_TITLE)) self._state._uisettings.set_hosted_account_name( self._state.get(HOSTED, ACCOUNT_NAME)) self._state._uisettings.set_hosted_password( self._state.get(HOSTED, PASSWORD)) self._state._uisettings.set_local_landscape_host( self._state.get(LOCAL, LANDSCAPE_HOST)) self._state._uisettings.set_local_account_name( self._state.get(LOCAL, ACCOUNT_NAME)) self._state._uisettings.set_local_password( self._state.get(LOCAL, PASSWORD)) def _save_to_config(self): """ Persist the subset of the data we want to make live to the actual configuration file. """ hosted = self._state.get(MANAGEMENT_TYPE) if hosted is NOT_MANAGED: pass else: if hosted == CANONICAL_MANAGED: first_key = HOSTED else: first_key = LOCAL self._state._proxy.url = derive_url_from_host_name( self._state.get(first_key, LANDSCAPE_HOST)) self._state._proxy.ping_url = derive_ping_url_from_host_name( self._state.get(first_key, LANDSCAPE_HOST)) self._state._proxy.account_name = self._state.get( first_key, ACCOUNT_NAME) self._state._proxy.registration_key = self._state.get( first_key, PASSWORD) self._state._proxy.computer_title = self._state.get(COMPUTER_TITLE) self._state._proxy.write() def persist(self): self._save_to_uisettings() self._save_to_config() return InitialisedState(self._state._data, self._state._proxy, self._state._uisettings) class UnpersistableHelper(Helper): """ Disallow persistence of a L{ConfigurationModel}. """ def persist(self): raise StateError("A ConfiguratonModel in " + self.__class__.__name__ + " cannot be transitioned via persist().") class ExitedState(ConfigurationState): """ The terminal state of L{ConfigurationModel}, you can't do anything further once this state is reached. """ def __init__(self, data, proxy, uisettings, exit_method=None, asynchronous=True): super(ExitedState, self).__init__(None, None, None) if callable(exit_method): exit_method() else: proxy.exit(asynchronous=asynchronous) self._unloadable_helper = UnloadableHelper(self) self._unmodifiable_helper = UnmodifiableHelper(self) self._unrevertable_helper = UnrevertableHelper(self) self._unpersistable_helper = UnpersistableHelper(self) def load_data(self, asynchronous=True, exit_method=None): return self._unloadable_helper.load_data(asynchronous=asynchronous, exit_method=exit_method) def modify(self): return self._unmodifiable_helper.modify() def revert(self): return self._unrevertable_helper.revert() def persist(self): return self._unpersistable_helper.persist() def exit(self, asynchronous=True): return self class ModifiedState(ConfigurationState): """ The state of a L{ConfigurationModel} whenever the user has modified some data but hasn't yet L{persist}ed or L{revert}ed. """ def __init__(self, data, proxy, uisettings): super(ModifiedState, self).__init__(data, proxy, uisettings) self._modifiable_helper = ModifiableHelper(self) self._revertable_helper = RevertableHelper(self) self._persistable_helper = PersistableHelper(self) def modify(self): return self._modifiable_helper.modify() def revert(self): return self._revertable_helper.revert() def persist(self): return self._persistable_helper.persist() class InitialisedState(ConfigurationState): """ The state of the L{ConfigurationModel} as initially presented to the user. Baseline data should have been loaded from the real configuration data, any persisted user data should be loaded into blank values and finally defaults should be applied where necessary. """ def __init__(self, data, proxy, uisettings): super(InitialisedState, self).__init__(data, proxy, uisettings) self._modifiable_helper = ModifiableHelper(self) self._unrevertable_helper = UnrevertableHelper(self) self._unpersistable_helper = UnpersistableHelper(self) self._load_uisettings_data() if not self._load_live_data(): raise TransitionError("Authentication Failure") def _load_uisettings_data(self): """ Load the complete set of dialog data from L{UISettings}. """ hosted = self._uisettings.get_management_type() self.set(MANAGEMENT_TYPE, hosted) computer_title = self._uisettings.get_computer_title() if computer_title: self.set(COMPUTER_TITLE, computer_title) self.set(HOSTED, ACCOUNT_NAME, self._uisettings.get_hosted_account_name()) self.set(HOSTED, PASSWORD, self._uisettings.get_hosted_password()) self.set(LOCAL, LANDSCAPE_HOST, self._uisettings.get_local_landscape_host()) local_account_name = self._uisettings.get_local_account_name() if local_account_name: self.set(LOCAL, ACCOUNT_NAME, local_account_name) self.set(LOCAL, PASSWORD, self._uisettings.get_local_password()) def _load_live_data(self): """ Load the current live subset of data from the configuration file. """ if self._proxy.load(None): computer_title = self._proxy.computer_title if computer_title: self.set(COMPUTER_TITLE, computer_title) url = self._proxy.url if url.find(HOSTED_LANDSCAPE_HOST) > -1: self.set(HOSTED, ACCOUNT_NAME, self._proxy.account_name) self.set(HOSTED, PASSWORD, self._proxy.registration_key) else: self.set(LOCAL, LANDSCAPE_HOST, derive_server_host_name_from_url(url)) if self._proxy.account_name != "": self.set(LOCAL, ACCOUNT_NAME, self._proxy.account_name) return True else: return False def load_data(self, asynchronous=True, exit_method=None): return self def modify(self): return self._modifiable_helper.modify() def revert(self): return self._unrevertable_helper.revert() def persist(self): return self._unpersistable_helper.persist() class VirginState(ConfigurationState): """ The state of the L{ConfigurationModel} before any actions have been taken upon it. """ def __init__(self, proxy, uisettings): super(VirginState, self).__init__(DEFAULT_DATA, proxy, uisettings) self._unmodifiable_helper = UnmodifiableHelper(self) self._unrevertable_helper = UnrevertableHelper(self) self._unpersistable_helper = UnpersistableHelper(self) def load_data(self, asynchronous=True, exit_method=None): try: return InitialisedState(self._data, self._proxy, self._uisettings) except TransitionError: return ExitedState(self._data, self._proxy, self._uisettings, asynchronous=asynchronous, exit_method=exit_method) def modify(self): return self._unmodifiable_helper.modify() def revert(self): return self._unrevertable_helper.revert() def persist(self): return self._unpersistable_helper.persist() class ConfigurationModel(object): """ L{ConfigurationModel} presents a model of configuration as the UI requirements describe it (separate values for the Hosted and Local configurations) as opposed to the real structure of the configuration file. This is intended to achieve the following: 1. Allow the expected behaviour in the UI without changing the live config file. 2. Supersede the overly complex logic in the controller layer with a cleaner state pattern. The allowable state transitions are: VirginState --(load_data)--> InitialisedState VirginState --(load_data)--> ExitedState VirginState --(exit)-------> ExitedState InitialisedState --(modify)-----> ModifiedState InitialisedState --(exit)-------> ExitedState ModifiedState --(revert)-----> InitialisedState ModifiedState --(modify)-----> ModifiedState ModifiedState --(persist)----> InitialisedState ModifiedState --(exit)-------> ExitedState """ def __init__(self, proxy=None, proxy_loadargs=[], uisettings=None): if not proxy: proxy = ConfigurationProxy(loadargs=proxy_loadargs) self._current_state = VirginState(proxy, uisettings) def get_state(self): """ Expose the underlying L{ConfigurationState}, for testing purposes. """ return self._current_state def load_data(self, asynchronous=True, exit_method=None): self._current_state = self._current_state.load_data( asynchronous=asynchronous, exit_method=exit_method) return isinstance(self._current_state, InitialisedState) def modify(self): self._current_state = self._current_state.modify() def revert(self): self._current_state = self._current_state.revert() def persist(self): self._current_state = self._current_state.persist() def _get_management_type(self): return self._current_state.get(MANAGEMENT_TYPE) def _set_management_type(self, value): self._current_state.set(MANAGEMENT_TYPE, value) management_type = property(_get_management_type, _set_management_type) def _get_computer_title(self): return self._current_state.get(COMPUTER_TITLE) def _set_computer_title(self, value): self._current_state.set(COMPUTER_TITLE, value) computer_title = property(_get_computer_title, _set_computer_title) def _get_hosted_landscape_host(self): return self._current_state.get(HOSTED, LANDSCAPE_HOST) hosted_landscape_host = property(_get_hosted_landscape_host) def _get_local_landscape_host(self): return self._current_state.get(LOCAL, LANDSCAPE_HOST) def _set_local_landscape_host(self, value): self._current_state.set(LOCAL, LANDSCAPE_HOST, value) local_landscape_host = property(_get_local_landscape_host, _set_local_landscape_host) def _get_hosted_account_name(self): return self._current_state.get(HOSTED, ACCOUNT_NAME) def _set_hosted_account_name(self, value): self._current_state.set(HOSTED, ACCOUNT_NAME, value) hosted_account_name = property(_get_hosted_account_name, _set_hosted_account_name) def _get_local_account_name(self): return self._current_state.get(LOCAL, ACCOUNT_NAME) def _set_local_account_name(self, value): self._current_state.set(LOCAL, ACCOUNT_NAME, value) local_account_name = property(_get_local_account_name, _set_local_account_name) def _get_hosted_password(self): return self._current_state.get(HOSTED, PASSWORD) def _set_hosted_password(self, value): self._current_state.set(HOSTED, PASSWORD, value) hosted_password = property(_get_hosted_password, _set_hosted_password) def _get_local_password(self): return self._current_state.get(LOCAL, PASSWORD) def _set_local_password(self, value): self._current_state.set(LOCAL, PASSWORD, value) local_password = property(_get_local_password, _set_local_password) def _get_is_modified(self): return isinstance(self.get_state(), ModifiedState) is_modified = property(_get_is_modified) def get_config_filename(self): return self._current_state.get_config_filename() def exit(self, asynchronous=True): self._current_state.exit(asynchronous=asynchronous) landscape-client-14.01/landscape/ui/model/configuration/proxy.py0000644000175000017500000000742612301414317024617 0ustar andreasandreas""" This module contains a class, L{ConfigurationProxy} which pretends to be a L{landscape.configuration.LandscapeSetupConfiguration} but actually directs its calls via DBus to the L{ConfigurationMechanism}. """ import dbus from landscape.ui.model.configuration.mechanism import ( SERVICE_NAME, INTERFACE_NAME, OBJECT_PATH) from landscape.configuration import LandscapeSetupConfiguration class ConfigurationProxy(object): """ L{ConfigurationProxy} attempts to be a drop-in replacement for L{LandscapeSetupConfiguration} allowing applications run by user accounts with the correct rights (as defined by a PolicyKit policy file) to interact with the landscape client configuration via a DBus service. This is the RightThing(TM) for PolicyKit and therefore for GNOME/Unity. The canonical case for this is L{landscape-client-settings-ui}. """ def __init__(self, bus=None, interface=None, loadargs=[]): self._loadargs = loadargs if bus is None: self._bus = dbus.SystemBus() else: self._bus = bus if interface is None: remote_object = self._bus.get_object(SERVICE_NAME, OBJECT_PATH) self._interface = dbus.Interface(remote_object, INTERFACE_NAME) else: self._interface = interface def load(self, arglist): if arglist is None: arglist = self._loadargs if len(arglist) == 0: al = "" else: al = chr(0x1e).join(arglist) try: self._interface.load(al) except dbus.DBusException, e: error_name = e.get_dbus_name() if error_name not in ("com.canonical.LandscapeClientSettings." "PermissionDeniedByPolicy", "org.freedesktop.DBus.Error.NoReply"): raise return False return True load.__doc__ = LandscapeSetupConfiguration.load.__doc__ def reload(self): self._interface.reload() reload.__doc__ = LandscapeSetupConfiguration.reload.__doc__ def write(self): self._interface.write() write.__doc__ = LandscapeSetupConfiguration.write.__doc__ def get_config_filename(self): return self._interface.get_config_filename() get_config_filename.__doc__ = \ LandscapeSetupConfiguration.get_config_filename.__doc__ def exit(self, asynchronous=True): """ Cause the mechanism to exit. """ def on_reply(): """ This will never get called because we call L{sys.exit} inside the mechanism. """ def on_error(): """ This will always be called, this allows us to supress the L{NoReply} error from DBus when we terminate the mechanism. """ if asynchronous: self._interface.exit(reply_handler=on_reply, error_handler=on_error) else: self._interface.exit() def _delegate_to_interface(field): def get(self): return self._interface.get(field) def set(self, value): self._interface.set(field, value) return get, set account_name = property(*_delegate_to_interface("account_name")) computer_title = property(*_delegate_to_interface("computer_title")) data_path = property(*_delegate_to_interface("data_path")) http_proxy = property(*_delegate_to_interface("http_proxy")) https_proxy = property(*_delegate_to_interface("https_proxy")) ping_url = property(*_delegate_to_interface("ping_url")) registration_key = property( *_delegate_to_interface("registration_key")) tags = property(*_delegate_to_interface("tags")) url = property(*_delegate_to_interface("url")) landscape-client-14.01/landscape/ui/model/configuration/tests/0000755000175000017500000000000012301414317024215 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/configuration/tests/test_uisettings.py0000644000175000017500000001502512301414317030027 0ustar andreasandreas from landscape.tests.helpers import LandscapeTest from landscape.ui.model.configuration.uisettings import UISettings from landscape.ui.tests.helpers import ( FakeGSettings, dbus_test_should_skip, dbus_skip_message) class UISettingsTest(LandscapeTest): default_data = {"management-type": "LDS", "computer-title": "bound.to.lose", "hosted-landscape-host": "landscape.canonical.com", "hosted-account-name": "Sparklehorse", "hosted-password": "Vivadixiesubmarinetransmissionplot", "local-landscape-host": "the.local.machine", "local-account-name": "CrazyHorse", "local-password": "RustNeverSleeps"} def setUp(self): super(UISettingsTest, self).setUp() self.settings = FakeGSettings(data=self.default_data) self.uisettings = UISettings(self.settings) def test_setup(self): """ Test that the L{GSettings.Client} is correctly initialised. """ self.assertTrue(self.settings.was_called_with_args( "new", UISettings.BASE_KEY)) def test_get_management_type(self): """ Test that the L{get_management_type} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("LDS", self.uisettings.get_management_type()) def test_set_management_type(self): """ Test that we can correctly use L{set_management_type} to write the L{management_type} value to the L{GSettings.Client}. """ self.assertEqual("LDS", self.uisettings.get_management_type()) self.uisettings.set_management_type("canonical") self.assertEqual("canonical", self.uisettings.get_management_type()) self.assertTrue(self.settings.was_called_with_args( "set_string", "management-type", "canonical")) def test_get_computer_title(self): """ Test that the L{get_computer_title} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("bound.to.lose", self.uisettings.get_computer_title()) def test_set_computer_title(self): """ Test that L{set_computer_title} correctly sets the value of L{computer_title} in the L{GSettings.Client}. """ self.assertEqual("bound.to.lose", self.uisettings.get_computer_title()) self.uisettings.set_computer_title("Bang") self.assertEqual("Bang", self.uisettings.get_computer_title()) def test_get_hosted_landscape_host(self): """ Test that the L{get_hosted_landscape_host} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("landscape.canonical.com", self.uisettings.get_hosted_landscape_host()) # NOTE: There is no facility to set the hosted-landscape-host def test_get_hosted_account_name(self): """ Test that the L{get_hosted_account_name} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("Sparklehorse", self.uisettings.get_hosted_account_name()) def test_set_hosted_account_name(self): """ Test that L{set_hosted_account_name} correctly sets the value of L{hosted_account_name} in the L{GSettings.Client}. """ self.assertEqual("Sparklehorse", self.uisettings.get_hosted_account_name()) self.uisettings.set_hosted_account_name("Bang") self.assertEqual("Bang", self.uisettings.get_hosted_account_name()) def test_get_hosted_password(self): """ Test that the L{get_hosted_password} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("Vivadixiesubmarinetransmissionplot", self.uisettings.get_hosted_password()) def test_set_hosted_password(self): """ Test that L{set_hosted_password} correctly sets the value of L{hosted_password} in the L{GSettings.Client}. """ self.assertEqual("Vivadixiesubmarinetransmissionplot", self.uisettings.get_hosted_password()) self.uisettings.set_hosted_password("Bang") self.assertEqual("Bang", self.uisettings.get_hosted_password()) def test_get_local_landscape_host(self): """ Test that the L{get_local_landscape_host} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("the.local.machine", self.uisettings.get_local_landscape_host()) def test_set_local_landscape_host(self): """ Test that L{set_local_landscape_host} correctly sets the value of L{local_landscape_host} in the L{GSettings.Client}. """ self.assertEqual("the.local.machine", self.uisettings.get_local_landscape_host()) self.uisettings.set_local_landscape_host("Bang") self.assertEqual("Bang", self.uisettings.get_local_landscape_host()) def test_get_local_account_name(self): """ Test that the L{get_local_account_name} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("CrazyHorse", self.uisettings.get_local_account_name()) def test_set_local_account_name(self): """ Test that L{set_local_account_name} correctly sets the value of L{local_account_name} in the L{GSettings.Client}. """ self.assertEqual("CrazyHorse", self.uisettings.get_local_account_name()) self.uisettings.set_local_account_name("Bang") self.assertEqual("Bang", self.uisettings.get_local_account_name()) def test_get_local_password(self): """ Test that the L{get_local_password} value is correctly fetched from the L{GSettings.Client}. """ self.assertEqual("RustNeverSleeps", self.uisettings.get_local_password()) def test_set_local_password(self): """ Test that L{set_local_password} correctly sets the value of L{local_password} in the L{GSettings.Client}. """ self.assertEqual("RustNeverSleeps", self.uisettings.get_local_password()) self.uisettings.set_local_password("Bang") self.assertEqual("Bang", self.uisettings.get_local_password()) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/configuration/tests/test_mechanism.py0000644000175000017500000001607612301414317027604 0ustar andreasandreasfrom landscape.configuration import LandscapeSetupConfiguration from landscape.tests.helpers import LandscapeTest from landscape.ui.tests.helpers import ( dbus_test_should_skip, dbus_skip_message) if not dbus_test_should_skip: import dbus from landscape.ui.model.configuration.mechanism import ( ConfigurationMechanism, INTERFACE_NAME) class MechanismTest(LandscapeTest): """ Test that we can use mechanism calls successfully from within a secure context (the easiest to achieve is in-process calls. """ def setUp(self): super(MechanismTest, self).setUp() config = "[client]\n" config += "data_path = /var/lib/landscape/client/\n" config += "http_proxy = http://proxy.localdomain:3192\n" config += "tags = a_tag\n" config += "url = https://landscape.canonical.com/message-system\n" config += "account_name = foo\n" config += "registration_key = boink\n" config += "computer_title = baz\n" config += "https_proxy = https://proxy.localdomain:6192\n" config += "ping_url = http://landscape.canonical.com/ping\n" self.config_filename = self.makeFile(config) self.config = LandscapeSetupConfiguration() dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SessionBus(private=True) bus_name = dbus.service.BusName(INTERFACE_NAME, bus) self.mechanism = ConfigurationMechanism(self.config, bus_name) self.config.load(["-c", self.config_filename]) def tearDown(self): self.mechanism.remove_from_connection() super(MechanismTest, self).tearDown() def test_is_local_call(self): """ Test simple mechanism for checking if a call is local does the right thing. Anything passed to this function that is not L{None} will result in is returning False - this in turn means that bypassing security will not happen, which is the right thing in failure cases too. """ self.assertTrue(self.mechanism._is_local_call(None, None)) self.assertFalse(self.mechanism._is_local_call(True, True)) def test_get_account_name(self): """ Test we can get account name from the mechanism. """ self.assertEqual("foo", self.mechanism.get("account_name")) def test_set_account_name(self): """ Test we can set the account name via the mechanism. """ self.mechanism.set("account_name", "bar") self.assertEqual("bar", self.mechanism.get("account_name")) def test_set_account_name_unicode(self): """ Non-ascii characters are replaced before passing to underlying config. """ self.mechanism.set("account_name", u"unicode\u00a3unicode") self.assertEqual("unicode?unicode", self.mechanism.get("account_name")) def test_no_unicode_to_underlying_config(self): """ Non-ascii characters are replaced before passing to underlying config. """ class FakeConfig(object): def __init__(self): self.account_name = None fake_config = FakeConfig() self.mechanism._config = fake_config self.mechanism.set("account_name", u"unicode\u00a3unicode") self.assertEqual("unicode?unicode", fake_config.account_name) def test_get_data_path(self): """ Test we can get the data path from the mechanism. """ self.assertEqual("/var/lib/landscape/client/", self.mechanism.get("data_path")) def set_data_path(self): """ Test we can set the data path via the mechanism. """ self.mechanism.set("data_path", "bar") self.assertEqual("bar", self.mechanism.get("data_path")) def test_get_http_proxy(self): """ Test that we can get the HTTP proxy from the mechanism. """ self.assertEqual("http://proxy.localdomain:3192", self.mechanism.get("http_proxy")) def test_set_http_proxy(self): """ Test that we can set the HTTP proxy via the mechanism. """ self.mechanism.set("http_proxy", "bar") self.assertEqual("bar", self.mechanism.get("http_proxy")) def test_get_tags(self): """ Test that we can get Tags from the mechanism. """ self.assertEqual("a_tag", self.mechanism.get("tags")) def test_set_tags(self): """ Test that we can set Tags via the mechanism. """ self.mechanism.set("tags", "bar") self.assertEqual("bar", self.mechanism.get("tags")) def test_get_url(self): """ Test that we can get URL from the mechanism. """ self.assertEqual("https://landscape.canonical.com/message-system", self.mechanism.get("url")) def test_set_url(self): """ Test that we can set the URL via the mechanisms. """ self.mechanism.set("url", "bar") self.assertEqual(self.mechanism.get("url"), "bar") def test_get_ping_url(self): """ Test that we can get the Ping URL from the mechanism. """ self.assertEqual("http://landscape.canonical.com/ping", self.mechanism.get("ping_url")) def test_set_ping_url(self): """ Test that we can set the Ping URL via the mechanism. """ self.mechanism.set("ping_url", "bar") self.assertEqual("bar", self.mechanism.get("ping_url")) def test_get_registration_key(self): """ Test that we can get the registration key from the mechanism. """ self.assertEqual("boink", self.mechanism.get("registration_key")) def test_set_registration_key(self): """ Test that we can set the registration key via the mechanism. """ self.mechanism.set("registration_key", "bar") self.assertEqual("bar", self.mechanism.get("registration_key")) def test_get_computer_title(self): """ Test that we can get the computer title from the mechanism. """ self.assertEqual("baz", self.mechanism.get("computer_title")) def test_set_computer_title(self): """ Test that we can set the computer title via the mechanism. """ self.mechanism.set("computer_title", "bar") self.assertEqual("bar", self.mechanism.get("computer_title")) def test_get_https_proxy(self): """ Test that we can get the HTTPS Proxy from the mechanism. """ self.assertEqual("https://proxy.localdomain:6192", self.mechanism.get("https_proxy")) def test_set_https_proxy(self): """ Test that we can set the HTTPS Proxy via the mechanism. """ self.mechanism.set("https_proxy", "bar") self.assertEqual("bar", self.mechanism.get("https_proxy")) def test_exit(self): """ Test that we cause the mechanism to exit. """ self.assertRaises(SystemExit, self.mechanism.exit) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/configuration/tests/test_state.py0000644000175000017500000006536512301414317026765 0ustar andreasandreasfrom landscape.ui.tests.helpers import ( ConfigurationProxyHelper, FakeGSettings, dbus_test_should_skip, dbus_skip_message) if not dbus_test_should_skip: from landscape.ui.model.configuration.uisettings import UISettings import landscape.ui.model.configuration.state from landscape.ui.model.configuration.state import ( ConfigurationModel, StateError, VirginState, InitialisedState, ModifiedState, MANAGEMENT_TYPE, HOSTED, LOCAL, HOSTED_LANDSCAPE_HOST, LANDSCAPE_HOST, COMPUTER_TITLE, ExitedState) from landscape.ui.model.configuration.mechanism import ( PermissionDeniedByPolicy) from landscape.ui.constants import ( CANONICAL_MANAGED, LOCAL_MANAGED, NOT_MANAGED) from landscape.tests.helpers import LandscapeTest class AuthenticationFailureTest(LandscapeTest): """ Test that an authentication failure is handled correctly in the L{ConfigurationModel}. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "" self.default_data = {"management-type": "canonical", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "bound.to.lose" super(AuthenticationFailureTest, self).setUp() def test_failed_authentication(self): """ Test that load returns False when authentication fails. """ def fake_faily_load(arglist): """ This simulates what you see if you click "Cancel" or give the wrong credentials 3 times when L{PolicyKit} challenges you. """ raise PermissionDeniedByPolicy() def fake_exit_method(): """ Avoid raising a L{SystemExit} exception. """ self.mechanism.load = fake_faily_load settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertFalse(model.load_data(asynchronous=False, exit_method=fake_exit_method)) self.assertTrue(isinstance(model.get_state(), ExitedState)) if dbus_test_should_skip: skip = dbus_skip_message class ConfigurationModelTest(LandscapeTest): """ Test the internal data handling of the L{ConfigurationModel} without loading external data. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "" self.default_data = {"management-type": "canonical", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "bound.to.lose" super(ConfigurationModelTest, self).setUp() def test_get(self): """ Test that L{get} correctly extracts data from the internal data storage of the L{ConfigurationState}s associated with a L{ConfigurationModel}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) state = model.get_state() self.assertEqual(NOT_MANAGED, state.get(MANAGEMENT_TYPE)) self.assertEqual(HOSTED_LANDSCAPE_HOST, state.get(HOSTED, LANDSCAPE_HOST)) self.assertRaises(TypeError, state.get, MANAGEMENT_TYPE, HOSTED, LANDSCAPE_HOST) self.assertRaises(KeyError, state.get, LANDSCAPE_HOST) self.assertRaises(KeyError, state.get, MANAGEMENT_TYPE, LANDSCAPE_HOST) def test_set(self): """ Test that L{set} correctly sets data in the internal data storage of the L{ConfigurationState}s associated with a L{ConfigurationModel}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) state = model.get_state() state.set(MANAGEMENT_TYPE, NOT_MANAGED) self.assertEqual(NOT_MANAGED, state.get(MANAGEMENT_TYPE)) state.set(MANAGEMENT_TYPE, CANONICAL_MANAGED) self.assertEqual(CANONICAL_MANAGED, state.get(MANAGEMENT_TYPE)) state.set(MANAGEMENT_TYPE, LOCAL_MANAGED) self.assertEqual(LOCAL_MANAGED, state.get(MANAGEMENT_TYPE)) self.assertEqual("", state.get(LOCAL, LANDSCAPE_HOST)) state.set(LOCAL, LANDSCAPE_HOST, "goodison.park") self.assertEqual("goodison.park", state.get(LOCAL, LANDSCAPE_HOST)) def test_virginal(self): """ Test that the L{ConfigurationModel} is created with default data. This should be managed via L{VirginState} (hence the name), but this should not be exposed and is not explicitly tested here (see L{StateTransitionTest}). """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual(NOT_MANAGED, model.management_type) self.assertEqual(HOSTED_LANDSCAPE_HOST, model.hosted_landscape_host) self.assertEqual("bound.to.lose", model.computer_title) self.assertEqual("", model.local_landscape_host) self.assertEqual("", model.hosted_account_name) self.assertEqual("standalone", model.local_account_name) self.assertEqual("", model.hosted_password) def test_is_hosted_property(self): """ Test we can use the L{is_hosted} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(CANONICAL_MANAGED, model.management_type) model.management_type = LOCAL_MANAGED self.assertEqual(LOCAL_MANAGED, model.management_type) model.management_type = NOT_MANAGED self.assertEqual(NOT_MANAGED, model.management_type) def test_computer_title_property(self): """ Test that we can use the L{computer_title} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual("bound.to.lose", model.computer_title) model.computer_title = "bound.to.win" self.assertEqual("bound.to.win", model.computer_title) def test_hosted_landscape_host_property(self): """ Test we can use the L{hosted_landscape_host} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual(HOSTED_LANDSCAPE_HOST, model.hosted_landscape_host) self.assertRaises(AttributeError, setattr, model, "hosted_landscape_host", "foo") def test_hosted_account_name_property(self): """ Test we can use the L{hosted_account_name} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual("", model.hosted_account_name) model.hosted_account_name = "foo" self.assertEqual("foo", model.hosted_account_name) def test_hosted_password_property(self): """ Test we can use the L{hosted_password} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual("", model.hosted_password) model.hosted_password = "foo" self.assertEqual("foo", model.hosted_password) def test_local_landscape_host_property(self): """ Test we can use the L{local_landscape_host} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual("", model.local_landscape_host) model.local_landscape_host = "foo" self.assertEqual("foo", model.local_landscape_host) def test_local_account_name_property(self): """ Test we can use the L{local_account_name} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual("standalone", model.local_account_name) model.local_account_name = "foo" self.assertEqual("foo", model.local_account_name) def test_local_password_property(self): """ Test we can use the L{local_password} property to set and get that data on the current L{ConfigurationState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertEqual("", model.local_password) model.local_password = "foo" self.assertEqual("foo", model.local_password) def test_exit(self): """ Test that we can cause the mechanism to exit. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertRaises(SystemExit, model.exit, asynchronous=False) if dbus_test_should_skip: skip = dbus_skip_message class ConfigurationModelHostedTest(LandscapeTest): """ Test the L{ConfigurationModel} is correctly initialised when the live configuration is set for a hosted account. Note the multilayer data loading: 1. Internal state is defaulted. 2. UISettings data is loaded. 3. Live configuration is loaded. """ helpers = [ConfigurationProxyHelper] default_data = {"management-type": "canonical", "computer-title": "bound.to.lose", "hosted-landscape-host": "landscape.canonical.com", "hosted-account-name": "Sparklehorse", "hosted-password": "Vivadixiesubmarinetransmissionplot", "local-landscape-host": "the.local.machine", "local-account-name": "CrazyHorse", "local-password": "RustNeverSleeps"} def setUp(self): self.config_string = "[client]\n" \ "data_path = /var/lib/landscape/client/\n" \ "http_proxy = http://proxy.localdomain:3192\n" \ "tags = a_tag\n" \ "url = https://landscape.canonical.com/message-system\n" \ "account_name = foo\n" \ "registration_key = boink\n" \ "computer_title = baz\n" \ "https_proxy = https://proxy.localdomain:6192\n" \ "ping_url = http://landscape.canonical.com/ping\n" super(ConfigurationModelHostedTest, self).setUp() def test_initialised_hosted(self): """ Test the L{ConfigurationModel} is correctly initialised from a proxy and defaults with hosted data. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(CANONICAL_MANAGED, model.management_type) self.assertEqual("landscape.canonical.com", model.hosted_landscape_host) self.assertEqual("the.local.machine", model.local_landscape_host) self.assertEqual("foo", model.hosted_account_name) self.assertEqual("CrazyHorse", model.local_account_name) self.assertEqual("boink", model.hosted_password) if dbus_test_should_skip: skip = dbus_skip_message class ConfigurationModelLocalTest(LandscapeTest): helpers = [ConfigurationProxyHelper] default_data = {"management-type": "LDS", "computer-title": "bound.to.lose", "hosted-landscape-host": "landscape.canonical.com", "hosted-account-name": "Sparklehorse", "hosted-password": "Vivadixiesubmarinetransmissionplot", "local-landscape-host": "the.local.machine", "local-account-name": "CrazyHorse", "local-password": "RustNeverSleeps"} def setUp(self): self.config_string = "[client]\n" \ "data_path = /var/lib/landscape/client/\n" \ "http_proxy = http://proxy.localdomain:3192\n" \ "tags = a_tag\n" \ "url = https://landscape.localdomain/message-system\n" \ "account_name = foo\n" \ "registration_key = boink\n" \ "computer_title = baz\n" \ "https_proxy = \n" \ "ping_url = http://landscape.localdomain/ping\n" super(ConfigurationModelLocalTest, self).setUp() def test_initialised_local(self): """ Test the L{ConfigurationModel} is correctly initialised from a proxy and defaults with local data. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(LOCAL_MANAGED, model.management_type) self.assertEqual("landscape.canonical.com", model.hosted_landscape_host) self.assertEqual("landscape.localdomain", model.local_landscape_host) self.assertEqual("Sparklehorse", model.hosted_account_name) self.assertEqual("foo", model.local_account_name) self.assertEqual("Vivadixiesubmarinetransmissionplot", model.hosted_password) if dbus_test_should_skip: skip = dbus_skip_message class StateTransitionTest(LandscapeTest): """ Test that we make the correct state transitions when taking actions on the L{ConfigurationModel}. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "" self.default_data = { "management-type": "canonical", "computer-title": "bound.to.lose", "hosted-landscape-host": "landscape.canonical.com", "hosted-account-name": "Sparklehorse", "hosted-password": "Vivadixiesubmarinetransmissionplot", "local-landscape-host": "the.local.machine", "local-account-name": "CrazyHorse", "local-password": "RustNeverSleeps"} super(StateTransitionTest, self).setUp() def test_load_data_transitions(self): """ Test that the L{ConfigurationModel} correctly changes state as we call L{load_data}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertTrue(isinstance(model.get_state(), VirginState)) model.load_data() self.assertTrue(isinstance(model.get_state(), InitialisedState)) initialised = model.get_state() model.load_data() self.assertTrue(isinstance(model.get_state(), InitialisedState)) self.assertIs(initialised, model.get_state()) def test_modifying_a_virgin_raises(self): """ Test that attempting a L{modify} a L{ConfigurationModel} in L{VirginState} raises a L{StateError}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertRaises(StateError, model.modify) def test_initialised_state_is_modifiable(self): """ Test that the L{ConfigurationModel} transitions to L{ModifiedState} whenever L{modify} is called on it in L{InitialisedState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(CANONICAL_MANAGED, model.management_type) model.management_type = LOCAL_MANAGED self.assertEqual(LOCAL_MANAGED, model.management_type) model.modify() self.assertTrue(isinstance(model.get_state(), ModifiedState)) self.assertEqual(LOCAL_MANAGED, model.management_type) def test_modified_state_is_modifiable(self): """ Test that the L{ConfigurationModel} transitions to L{ModifiedState} whenever L{modify} is called on it in L{ModifiedState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() model.modify() self.assertTrue(isinstance(model.get_state(), ModifiedState)) model.modify() self.assertTrue(isinstance(model.get_state(), ModifiedState)) def test_reverting_a_virgin_raises(self): """ Test that calling L{revert} on a L{ConfigurationModel} in L{VirginState} raises a L{StateError}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertRaises(StateError, model.revert) def test_initialiased_state_is_unrevertable(self): """ Test that calling L{revert} on a L{ConfigurationModel} in L{InitialisedState} raises a L{StateError}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertRaises(StateError, model.revert) def test_modified_state_is_revertable(self): """ Test that a L{ConfigurationModel} in L{ModifiedState} can be transitioned via L{revert} to L{InitialisedState}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() model.modify() model.revert() self.assertTrue(isinstance(model.get_state(), InitialisedState)) def test_reverting_reverts_data(self): """ Test that transitioning via L{revert} causes the original L{InitialisedState} to be restored. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(HOSTED_LANDSCAPE_HOST, model.hosted_landscape_host) self.assertEqual("CrazyHorse", model.local_account_name) model.local_account_name = "bar" model.modify() self.assertEqual("bar", model.local_account_name) model.revert() self.assertTrue(isinstance(model.get_state(), InitialisedState)) self.assertEqual("CrazyHorse", model.local_account_name) def test_persisting_a_virgin_raises(self): """ Test that a L{ConfigurationModel} in L{VirginState} will raise a L{StateError} when you attempt to transition it with L{persist}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.assertRaises(StateError, model.persist) def test_persisting_initialised_state_raises(self): """ Test that a L{ConfigurationModel} in L{IntialisedState} will raise a L{StateError} when you attempt to transition it with L{persist}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertRaises(StateError, model.persist) def test_persisting_modified_is_allowed(self): """ Test that a L{ConfigurationModel} in L{ModifiedState} will allow itself to be transitioned with L{persist}. """ settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() model.modify() model.persist() self.assertTrue(isinstance(model.get_state(), InitialisedState)) def test_persisting_saves_data_to_uisettings(self): settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual(CANONICAL_MANAGED, uisettings.get_management_type()) self.assertEqual("Sparklehorse", uisettings.get_hosted_account_name()) self.assertEqual("Vivadixiesubmarinetransmissionplot", uisettings.get_hosted_password()) self.assertEqual("the.local.machine", uisettings.get_local_landscape_host()) self.assertEqual("CrazyHorse", uisettings.get_local_account_name()) self.assertEqual("RustNeverSleeps", uisettings.get_local_password()) model.management_type = LOCAL_MANAGED model.hosted_account_name = "ThomasPaine" model.hosted_password = "TheAgeOfReason" model.local_landscape_host = "another.local.machine" model.local_account_name = "ThomasHobbes" model.local_password = "TheLeviathan" model.modify() self.assertTrue(isinstance(model.get_state(), ModifiedState)) model.persist() self.assertTrue(isinstance(model.get_state(), InitialisedState)) self.assertEqual(LOCAL_MANAGED, uisettings.get_management_type()) self.assertEqual("ThomasPaine", uisettings.get_hosted_account_name()) self.assertEqual("TheAgeOfReason", uisettings.get_hosted_password()) self.assertEqual("another.local.machine", uisettings.get_local_landscape_host()) self.assertEqual("ThomasHobbes", uisettings.get_local_account_name()) self.assertEqual("TheLeviathan", uisettings.get_local_password()) def test_any_transition_on_exited_state_raises(self): """ Test that we cannot transition the L{ExitedState} at all. """ def fake_exit(): """ This just avoids raising L{exceptions.SysExit} during __init__. """ state = ExitedState(None, None, None, exit_method=fake_exit) self.assertRaises(StateError, state.load_data) self.assertRaises(StateError, state.modify) self.assertRaises(StateError, state.persist) self.assertRaises(StateError, state.revert) if dbus_test_should_skip: skip = dbus_skip_message class StateTransitionWithExistingConfigTest(LandscapeTest): """ Test that we handle existing configuration data correctly when transitioning through L{ConfigurationModel} states. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = ( "[client]\n" "data_path = /var/lib/landscape/client/\n" "http_proxy = http://proxy.localdomain:3192\n" "tags = a_tag\n" "url = https://landscape.canonical.com/message-system\n" "account_name = Sparklehorse\n" "registration_key = Vivadixiesubmarinetransmissionplot\n" "computer_title = baz\n" "https_proxy = https://proxy.localdomain:6192\n" "ping_url = http://landscape.canonical.com/ping\n") self.default_data = { "management-type": "canonical", "computer-title": "bound.to.lose", "hosted-landscape-host": "landscape.canonical.com", "hosted-account-name": "Sparklehorse", "hosted-password": "Vivadixiesubmarinetransmissionplot", "local-landscape-host": "the.local.machine", "local-account-name": "CrazyHorse", "local-password": "RustNeverSleeps"} super(StateTransitionWithExistingConfigTest, self).setUp() def test_persisting_saves_data_to_proxy(self): settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) model.load_data() self.assertEqual("Sparklehorse", self.proxy.account_name) self.assertEqual("Vivadixiesubmarinetransmissionplot", self.proxy.registration_key) model.management_type = LOCAL_MANAGED model.local_account_name = "ThomasPaine" model.local_password = "TheAgeOfReason" model.modify() self.assertTrue(isinstance(model.get_state(), ModifiedState)) model.persist() self.assertTrue(isinstance(model.get_state(), InitialisedState)) self.assertEqual(LOCAL_MANAGED, model.management_type) self.assertEqual("https://the.local.machine/message-system", self.proxy.url) self.assertEqual("http://the.local.machine/ping", self.proxy.ping_url) self.assertEqual("ThomasPaine", self.proxy.account_name) self.assertEqual("TheAgeOfReason", self.proxy.registration_key) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/configuration/tests/test_proxy.py0000644000175000017500000001500312301414317027006 0ustar andreasandreasfrom landscape.configuration import LandscapeSetupConfiguration from landscape.ui.tests.helpers import ( ConfigurationProxyHelper, dbus_test_should_skip, dbus_skip_message) if not dbus_test_should_skip: from landscape.ui.model.configuration.mechanism import ( PermissionDeniedByPolicy) import dbus from landscape.tests.helpers import LandscapeTest class AuthenticationFailureTest(LandscapeTest): """ Test that an authentication failure is handled correctly. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "" super(AuthenticationFailureTest, self).setUp() def test_failed_authentication(self): """ Test that load returns False when authentication fails. """ def fake_policy_failure_load(arglist): """ This simulates what you see if you click "Cancel" or give the wrong credentials 3 times when L{PolicyKit} challenges you. """ raise PermissionDeniedByPolicy() def fake_timeout_failure_load(arglist): """ This simulates what you see if you take no action when L{PolicyKit} challenges you. """ class FakeNoReply(dbus.DBusException): """ Simulate a L{DBus} L{NoReply} exception. """ _dbus_error_name = "org.freedesktop.DBus.Error.NoReply" raise FakeNoReply() self.mechanism.load = fake_policy_failure_load self.assertFalse(self.proxy.load([])) self.mechanism.load = fake_timeout_failure_load self.assertFalse(self.proxy.load([])) if dbus_test_should_skip: skip = dbus_skip_message class ConfigurationProxyInterfaceTest(LandscapeTest): """ Test that we define the correct interface to a L{LandscapeSetupConfiguration} by really using one as the interface. """ helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "[client]\n" \ "data_path = /var/lib/landscape/client/\n" \ "http_proxy = http://proxy.localdomain:3192\n" \ "tags = a_tag\n" \ "url = https://landscape.canonical.com/message-system\n" \ "account_name = foo\n" \ "registration_key = boink\n" \ "computer_title = baz\n" \ "https_proxy = https://proxy.localdomain:6192\n" \ "ping_url = http://landscape.canonical.com/ping\n" super(ConfigurationProxyInterfaceTest, self).setUp() def test_method_docstrings(self): """ Test that we pass through the docstrings for methods. """ self.assertEqual(self.proxy.load.__doc__, LandscapeSetupConfiguration.load.__doc__) self.assertEqual(self.proxy.reload.__doc__, LandscapeSetupConfiguration.reload.__doc__) self.assertEqual(self.proxy.write.__doc__, LandscapeSetupConfiguration.write.__doc__) def test_account_name(self): """ Test that we can get and set an account name via the configuration proxy. """ self.assertEqual("foo", self.proxy.account_name) self.proxy.account_name = "bar" self.assertEqual("bar", self.proxy.account_name) self.assertEqual("bar", self.config.account_name) def test_computer_title(self): """ Test that we can get and set a computer title via the configuration proxy. """ self.assertEqual("baz", self.proxy.computer_title) self.proxy.computer_title = "bar" self.assertEqual("bar", self.proxy.computer_title) self.assertEqual("bar", self.config.computer_title) def test_data_path(self): """ Test that we can get and set the data path via the configuration proxy. """ self.assertEqual("/var/lib/landscape/client/", self.proxy.data_path) self.proxy.data_path = "bar" self.assertEqual("bar", self.proxy.data_path) self.assertEqual("bar", self.config.data_path) def test_http_proxy(self): """ Test that we can get and set the HTTP proxy via the configuration proxy. """ self.assertEqual("http://proxy.localdomain:3192", self.proxy.http_proxy) self.proxy.http_proxy = "bar" self.assertEqual("bar", self.proxy.http_proxy) self.assertEqual("bar", self.config.http_proxy) def test_https_proxy(self): """ Test that we can get and set the HTTPS proxy via the configuration proxy. """ self.assertEqual("https://proxy.localdomain:6192", self.proxy.https_proxy) self.proxy.https_proxy = "bar" self.assertEqual("bar", self.proxy.https_proxy) self.assertEqual("bar", self.config.https_proxy) def test_ping_url(self): """ Test that we can get and set the ping URL via the configuration proxy. """ self.assertEqual("http://landscape.canonical.com/ping", self.proxy.ping_url) self.proxy.ping_url = "bar" self.assertEqual("bar", self.proxy.ping_url) self.assertEqual("bar", self.config.ping_url) def test_registration_key(self): """ Test that we can get and set the registration key via the configuration proxy. """ self.assertEqual("boink", self.proxy.registration_key) self.proxy.registration_key = "bar" self.assertEqual("bar", self.proxy.registration_key) self.assertEqual("bar", self.config.registration_key) def test_tags(self): """ Test that we can get and set the tags via the configuration proxy. """ self.assertEqual("a_tag", self.proxy.tags) self.proxy.tags = "bar" self.assertEqual("bar", self.proxy.tags) self.assertEqual("bar", self.config.tags) def test_url(self): """ Test that we can get and set the URL via the configuration proxy. """ self.assertEqual("https://landscape.canonical.com/message-system", self.proxy.url) self.proxy.url = "bar" self.assertEqual("bar", self.proxy.url) self.assertEqual("bar", self.config.url) def test_exit(self): """ Test that we can cause the mechanism to exit. """ self.assertRaises(SystemExit, self.proxy.exit, asynchronous=False) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/configuration/tests/__init__.py0000644000175000017500000000000012301414317026314 0ustar andreasandreaslandscape-client-14.01/landscape/ui/model/configuration/uisettings.py0000644000175000017500000000371412301414317025630 0ustar andreasandreasclass UISettings(object): """ A very thin wrapper around L{GSettings} to avoid having to know the L{BaseKey} and type information elsewhere. In some future version it would be right to bind to change events here so we can react to people changing the settings in dconf, for now that is overkill. """ BASE_KEY = "com.canonical.landscape-client-settings" def __init__(self, settings): self.settings = settings.new(self.BASE_KEY) def get_management_type(self): return self.settings.get_string("management-type") def set_management_type(self, value): self.settings.set_string("management-type", value) def get_computer_title(self): return self.settings.get_string("computer-title") def set_computer_title(self, value): self.settings.set_string("computer-title", value) def get_hosted_landscape_host(self): return self.settings.get_string("hosted-landscape-host") def get_hosted_account_name(self): return self.settings.get_string("hosted-account-name") def set_hosted_account_name(self, value): self.settings.set_string("hosted-account-name", value) def get_hosted_password(self): return self.settings.get_string("hosted-password") def set_hosted_password(self, value): self.settings.set_string("hosted-password", value) def get_local_landscape_host(self): return self.settings.get_string("local-landscape-host") def set_local_landscape_host(self, value): self.settings.set_string("local-landscape-host", value) def get_local_account_name(self): return self.settings.get_string("local-account-name") def set_local_account_name(self, value): self.settings.set_string("local-account-name", value) def get_local_password(self): return self.settings.get_string("local-password") def set_local_password(self, value): self.settings.set_string("local-password", value) landscape-client-14.01/landscape/ui/model/configuration/__init__.py0000644000175000017500000000000012301414317025152 0ustar andreasandreaslandscape-client-14.01/landscape/ui/model/configuration/mechanism.py0000644000175000017500000001047212301414317025375 0ustar andreasandreasimport sys import dbus import dbus.service from landscape.ui.lib.polkit import PolicyKitMechanism, POLICY_NAME SERVICE_NAME = "com.canonical.LandscapeClientSettings" INTERFACE_NAME = "com.canonical.LandscapeClientSettings.ConfigurationInterface" OBJECT_PATH = "/com/canonical/LandscapeClientSettings/ConfigurationInterface" class PermissionDeniedByPolicy(dbus.DBusException): _dbus_error_name = \ "com.canonical.LandscapeClientSettings.PermissionDeniedByPolicy" class ConfigurationMechanism(PolicyKitMechanism): """ L{ConfigurationMechanism} provides access to the L{LandscapeSetupConfiguration} object via DBus with access control implemented via PolicyKit policy. The use of DBus results from the use of PolicyKit, not the other way around, and is done that way because that is considered to be the Right Thing for Ubuntu Desktop circa January 2012. """ def __init__(self, config, bus_name, bypass=False, conn=None): super(ConfigurationMechanism, self).__init__( OBJECT_PATH, bus_name, PermissionDeniedByPolicy, bypass=bypass, conn=conn) self._config = config @dbus.service.method(INTERFACE_NAME, in_signature="as", out_signature="", sender_keyword="sender", connection_keyword="conn") def load(self, arglist, sender=None, conn=None): if self._is_allowed_by_policy(sender, conn, POLICY_NAME): if len(arglist) > 0: self._config.load(arglist.split(chr(0x1e))) else: self._config.load([]) return @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="", sender_keyword="sender", connection_keyword="conn") def reload(self, sender=None, conn=None): if self._is_allowed_by_policy(sender, conn, POLICY_NAME): self._config.reload() @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="", sender_keyword="sender", connection_keyword="conn") def write(self, sender=None, conn=None): if self._is_allowed_by_policy(sender, conn, POLICY_NAME): self._config.write() @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="s", sender_keyword="sender", connection_keyword="conn") def get_config_filename(self, sender=None, conn=None): return self._config.get_config_filename() @dbus.service.method(INTERFACE_NAME, in_signature="s", out_signature="s", sender_keyword="sender", connection_keyword="conn") def get(self, name, sender=None, conn=None): """ Return the configuration option value associated with L{name} from the L{LandscapeSetupConfiguration}. """ if self._is_allowed_by_policy(sender, conn, POLICY_NAME): try: value = self._config.get(name) except AttributeError: return "" if value is None: return "" return str(value) return "" @dbus.service.method(INTERFACE_NAME, in_signature="ss", out_signature="", sender_keyword="sender", connection_keyword="conn") def set(self, name, value, sender=None, conn=None): """ Set the configuration option associated with L{name} to L{value} in the L{LandscapeSetupConfiguration}. """ if self._is_allowed_by_policy(sender, conn, POLICY_NAME): # Underlying _config does not support unicode so convert to ascii value = unicode(value).encode("ascii", errors="replace") setattr(self._config, name, value) @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="", sender_keyword="sender", connection_keyword="conn") def exit(self, sender=None, conn=None): """ Exit this process. """ sys.exit(0) landscape-client-14.01/landscape/ui/model/tests/0000755000175000017500000000000012301414317021346 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/tests/__init__.py0000644000175000017500000000000012301414317023445 0ustar andreasandreaslandscape-client-14.01/landscape/ui/model/registration/0000755000175000017500000000000012301414317022716 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/registration/proxy.py0000644000175000017500000001223512301414317024454 0ustar andreasandreasimport dbus import landscape.ui.model.registration.mechanism as mechanism class RegistrationProxy(object): """ L{RegistrationProxy} allows the use of the L{RegistrationMechanism} via DBus without having to know about DBus. This in turn allows controller code to remain agnostic to the implementation of registration. """ def __init__(self, on_register_notify=None, on_register_error=None, on_register_succeed=None, on_register_fail=None, on_disable_succeed=None, on_disable_fail=None, bus=None): self._bus = None self._interface = None self._on_register_notify = on_register_notify self._on_register_error = on_register_error self._on_register_succeed = on_register_succeed self._on_register_fail = on_register_fail self._on_disable_succeed = on_disable_succeed self._on_disable_fail = on_disable_fail self._setup_interface(bus) def _setup_interface(self, bus): """ Redefining L{_setup_interface} allows us to bypass DBus for more convenient testing in some instances. """ if bus is None: self._bus = dbus.SystemBus() else: self._bus = bus self._remote_object = self._bus.get_object(mechanism.SERVICE_NAME, mechanism.OBJECT_PATH) self._interface = dbus.Interface(self._remote_object, mechanism.INTERFACE_NAME) def _exit_handler_wrapper(self, exit_handler): def wrapped_exit_handler(message): self._remove_handlers() exit_handler(message) return wrapped_exit_handler def _register_handlers(self): self._handlers = [] if self._on_register_notify: self._handlers.append( self._bus.add_signal_receiver( self._on_register_notify, signal_name="register_notify", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) if self._on_register_error: self._handlers.append( self._bus.add_signal_receiver( self._on_register_error, signal_name="register_error", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) if self._on_register_succeed: self._handlers.append( self._bus.add_signal_receiver( self._exit_handler_wrapper(self._on_register_succeed), signal_name="register_succeed", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) if self._on_register_fail: self._handlers.append( self._bus.add_signal_receiver( self._exit_handler_wrapper(self._on_register_fail), signal_name="register_fail", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) if self._on_disable_succeed: self._handlers.append( self._bus.add_signal_receiver( self._exit_handler_wrapper(self._on_disable_succeed), signal_name="disable_succeed", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) if self._on_disable_fail: self._handlers.append( self._bus.add_signal_receiver( self._exit_handler_wrapper(self._on_disable_fail), signal_name="disable_fail", dbus_interface=mechanism.INTERFACE_NAME, bus_name=None, path=mechanism.OBJECT_PATH)) def _remove_handlers(self): for handler in self._handlers: self._bus.remove_signal_receiver(handler) def challenge(self): return self._interface.challenge() def register(self, config_path): self._register_handlers() try: result, message = self._interface.register(config_path) except dbus.DBusException, e: if e.get_dbus_name() != "org.freedesktop.DBus.Error.NoReply": raise else: result = False message = "Registration timed out." if result: self._on_register_succeed() else: self._on_register_error(message) return result def disable(self): self._register_handlers() result = self._interface.disable() if result: self._on_disable_succeed() else: self._on_disable_fail() return result def exit(self): """ Cause the mechanism to exit. """ try: self._interface.exit() except dbus.DBusException, e: if e.get_dbus_name() != "org.freedesktop.DBus.Error.NoReply": raise landscape-client-14.01/landscape/ui/model/registration/tests/0000755000175000017500000000000012301414317024060 5ustar andreasandreaslandscape-client-14.01/landscape/ui/model/registration/tests/test_mechanism.py0000644000175000017500000000654412301414317027446 0ustar andreasandreasimport dbus from landscape.tests.helpers import LandscapeTest from landscape.ui.tests.helpers import dbus_test_should_skip, dbus_skip_message if not dbus_test_should_skip: from landscape.ui.model.registration.mechanism import ( RegistrationMechanism, INTERFACE_NAME) class MechanismTest(LandscapeTest): """ L{MechanismTest} mocks out the actual registration process and allows us to simply and quickly check the outputs of registration that are relied on elsewhere. """ def setUp(self): super(MechanismTest, self).setUp() dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SessionBus(private=True) self.bus_name = dbus.service.BusName(INTERFACE_NAME, bus) self.mechanism = None def tearDown(self): if not self.mechanism is None: self.mechanism.remove_from_connection() super(MechanismTest, self).tearDown() def make_fake_registration(self, succeed, message=""): """ Return a fake registration method that will fail or succeed by returning L{succeed} (a boolean). """ def _do_registration(this, config_path): return succeed, message return _do_registration def make_fake_disabling(self, succeed): """ Return a fake disabling method that will fail or succeed by returning L{succeed} (a boolean). """ def _do_disabling(this): return succeed return _do_disabling def test_registration_succeed(self): """ Test we get appropriate feedback from a successful connection when we call L{register} synchronously. """ RegistrationMechanism._do_registration = self.make_fake_registration( True) self.mechanism = RegistrationMechanism(self.bus_name) self.assertEqual( (True, "Registration message sent to Landscape server.\n"), self.mechanism.register("foo")) def test_registration_fail(self): """ Test we get appropriate feedback from a failed connection when we call L{register} synchronously. """ RegistrationMechanism._do_registration = self.make_fake_registration( False, "boom") self.mechanism = RegistrationMechanism(self.bus_name) self.assertEqual((False, "boom"), self.mechanism.register("foo")) def test_disabling_succeed(self): """ Test we get True from a failed disabling when we call L{disable} synchronously. """ RegistrationMechanism._do_disabling = self.make_fake_disabling(True) self.mechanism = RegistrationMechanism(self.bus_name) self.assertTrue(self.mechanism.disable()) def test_disabling_fail(self): """ Test we get False from a failed disabling when we call L{disable} synchronously. """ RegistrationMechanism._do_disabling = self.make_fake_disabling(False) self.mechanism = RegistrationMechanism(self.bus_name) self.assertFalse(self.mechanism.disable()) def test_exit(self): """ Test that we cause the mechanism to exit. """ self.mechanism = RegistrationMechanism(self.bus_name) self.assertRaises(SystemExit, self.mechanism.exit) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/registration/tests/test_proxy.py0000644000175000017500000001105312301414317026652 0ustar andreasandreasimport dbus from landscape.tests.helpers import LandscapeTest from landscape.ui.tests.helpers import dbus_test_should_skip, dbus_skip_message if not dbus_test_should_skip: from landscape.ui.model.registration.mechanism import ( RegistrationMechanism, INTERFACE_NAME) from landscape.ui.model.registration.proxy import RegistrationProxy class TimeoutTest(LandscapeTest): """ L{TimeoutTest} bypasses DBus and tests with a faked method that raises a timeout exception. """ def setUp(self): super(TimeoutTest, self).setUp() self.error_handler_messages = [] class FakeBus(object): """ Hello, I will be your fake DBus for this flight. """ class FakeTimeoutException(dbus.DBusException): _dbus_error_name = "org.freedesktop.DBus.Error.NoReply" class FakeFailyMechanism(object): def register(this, config_path, reply_handler=None, error_handler=None): raise FakeTimeoutException() def fake_setup_interface(this, bus): this._interface = FakeFailyMechanism() this._bus = bus def fake_register_handlers(this): pass def fake_remove_handlers(this): pass def fake_error_handler(message): self.error_handler_messages.append(message) RegistrationProxy._setup_interface = fake_setup_interface RegistrationProxy._register_handlers = fake_register_handlers RegistrationProxy._remove_handlers = fake_remove_handlers self.proxy = RegistrationProxy(bus=FakeBus(), on_register_error=fake_error_handler) def tearDown(self): self.error_handler_messages = [] super(TimeoutTest, self).tearDown() def test_register(self): """ Test that the proxy calls through to the underlying interface and correctly performs registration. """ self.proxy.register("foo") self.assertEqual(1, len(self.error_handler_messages)) [message] = self.error_handler_messages self.assertEqual("Registration timed out.", message) if dbus_test_should_skip: skip = dbus_skip_message class RegistrationProxyTest(LandscapeTest): """ L{RegistrationProxyTest} bypasses DBus to simply check the interface between the proxy and the mechanism it would usually contact via DBus. """ def setUp(self): super(RegistrationProxyTest, self).setUp() dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SessionBus(private=True) bus_name = dbus.service.BusName(INTERFACE_NAME, bus) def fake_do__registration(this, config_path): return True, "" def fake_do__disabling(this): return True RegistrationMechanism._do_registration = fake_do__registration RegistrationMechanism._do_disabling = fake_do__disabling self.mechanism = RegistrationMechanism(bus_name) def fake_setup_interface(this, bus): """ This just allows us to test without actually relying on dbus. """ this._interface = self.mechanism def fake_register_handlers(this): pass def fake_remove_handlers(this): pass def fake_callback(message=None): pass RegistrationProxy._setup_interface = fake_setup_interface RegistrationProxy._register_handlers = fake_register_handlers RegistrationProxy._remove_handlers = fake_remove_handlers self.proxy = RegistrationProxy(fake_callback, fake_callback, fake_callback, fake_callback, fake_callback, fake_callback) def tearDown(self): self.mechanism.remove_from_connection() super(RegistrationProxyTest, self).tearDown() def test_register(self): """ Test that the proxy calls through to the underlying interface and correctly performs registration. """ self.assertEqual(True, self.proxy.register("foo")) def test_disable(self): """ Test that the proxy calls through to the underlying interface and correctly performs disabling. """ self.assertEqual(True, self.proxy.disable()) def test_exit(self): """ Test that we can cause the mechanism to exit. """ self.assertRaises(SystemExit, self.proxy.exit) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/model/registration/tests/__init__.py0000644000175000017500000000000012301414317026157 0ustar andreasandreaslandscape-client-14.01/landscape/ui/model/registration/__init__.py0000644000175000017500000000000012301414317025015 0ustar andreasandreaslandscape-client-14.01/landscape/ui/model/registration/mechanism.py0000644000175000017500000001461212301414317025240 0ustar andreasandreasimport subprocess import sys import os import dbus import dbus.service import dbus.glib from landscape.ui.lib.polkit import PolicyKitMechanism, POLICY_NAME SERVICE_NAME = "com.canonical.LandscapeClientRegistration" INTERFACE_NAME = \ "com.canonical.LandscapeClientRegistration.RegistrationInterface" OBJECT_PATH = \ "/com/canonical/LandscapeClientRegistration/RegistrationInterface" class PermissionDeniedByPolicy(dbus.DBusException): _dbus_error_name = \ "com.canonical.LandscapeClientRegistration.PermissionDeniedByPolicy" class RegistrationError(dbus.DBusException): _dbus_error_name = \ "com.canonical.LandscapeClientRegistration.RegistrationError" class RegistrationMechanism(PolicyKitMechanism): """ L{RegistrationMechanism} is a mechanism for invoking and observing client registration over DBus. It utilises PolicyKit to ensure that only administrative users may use it. """ def __init__(self, bus_name, bypass=False, conn=None): super(RegistrationMechanism, self).__init__( OBJECT_PATH, bus_name, PermissionDeniedByPolicy, bypass=bypass, conn=conn) self.process = None self.message_queue = [] self.error_queue = [] def _do_registration(self, config_path): self.register_notify("Trying to register ...\n") cmd = ["landscape-config", "--silent", "-c", os.path.abspath(config_path)] try: message = subprocess.check_output(cmd, stderr=subprocess.STDOUT) self.register_notify(message) return True, message except subprocess.CalledProcessError, error: wait_phrase = "Please wait... " wait_phrase_index = error.output.find(wait_phrase) if wait_phrase_index > -1: message = error.output[wait_phrase_index + len(wait_phrase):] else: message = "Landscape configuration failed.\n%s" % error.output self.register_error(message) return False, message @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='s') def register_notify(self, message): """ L{register_notify} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='s') def register_error(self, message): """ L{register_error} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='s') def register_succeed(self, message): """ L{register_succeed} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='s') def register_fail(self, message): """ L{register_fail} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="b", sender_keyword="sender", connection_keyword="conn") def challenge(self, sender=None, conn=None): """ Safely check if we can escalate permissions. """ try: return self._is_allowed_by_policy(sender, conn, POLICY_NAME) except PermissionDeniedByPolicy: return False @dbus.service.method(INTERFACE_NAME, in_signature="s", out_signature="(bs)", sender_keyword="sender", connection_keyword="conn") def register(self, config_path, sender=None, conn=None): if self._is_allowed_by_policy(sender, conn, POLICY_NAME): succeed, message = self._do_registration(config_path) if succeed: message = "Registration message sent to Landscape server.\n" self.register_succeed(message) return (True, message) else: self.register_fail(message) return (False, message) def _do_disabling(self): cmd = ["landscape-config", "--disable"] try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) return True except subprocess.CalledProcessError: return False @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='') def disable_succeed(self): """ L{disable_succeed} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.signal(dbus_interface=INTERFACE_NAME, signature='') def disable_fail(self): """ L{disable_fail} is a signal sent to subscribers. It is not necessary for any actual work to occur in the method as it is called for the effect of invoking its decorator. """ @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="b", sender_keyword="sender", connection_keyword="conn") def disable(self, sender=None, conn=None): if self._is_allowed_by_policy(sender, conn, POLICY_NAME): if self._do_disabling(): self.disable_succeed() return True else: self.disable_fail() return False @dbus.service.method(INTERFACE_NAME, in_signature="", out_signature="", sender_keyword="sender", connection_keyword="conn") def exit(self, sender=None, conn=None): """ Exit this process. """ sys.exit(0) landscape-client-14.01/landscape/ui/model/__init__.py0000644000175000017500000000000012301414317022303 0ustar andreasandreaslandscape-client-14.01/landscape/ui/lib/0000755000175000017500000000000012301414317017652 5ustar andreasandreaslandscape-client-14.01/landscape/ui/lib/polkit.py0000644000175000017500000000743712301414317021541 0ustar andreasandreasimport dbus import dbus.service import dbus.glib from gi.repository import GObject POLICY_NAME = "com.canonical.LandscapeClientSettings.configure" class PolicyKitMechanism(dbus.service.Object): """ L{PolicyKitMechanism} is a specialised L{dbus.service.Object} which provides PolicyKit authorization checks for a provided DBus bus name and object path. Subclasses must therefore call l{__init__} here with their object path, bus name and an error class to be raised when permission escalation fails. @type object_path: string @param object_path: The object path to register the subclass with. @type bus_name: dbus.service.BusName @param bus_name: The L{BusName} to the register the subclass with. @type permission_error: dbus.DBusException @param permission_error: A L{dbus.DBusException} to be raised when PolicyKit authorisation fails for the client. """ def __init__(self, object_path, bus_name, permission_error, bypass=False, conn=None): super(PolicyKitMechanism, self).__init__( conn, object_path, bus_name) self.permission_error = permission_error self.dbus_info = None self.polkit = None self.bypass = bypass def _get_polkit_authorization(self, pid, privilege): """ Check that the process with id L{pid} is allowed, by policy to utilise the L{privilege }. If the class was initialised with L{bypass}=True then just say it was authorised without checking (useful for testing). """ if self.bypass: return (True, None, "Bypass") polkit = dbus.Interface(dbus.SystemBus().get_object( 'org.freedesktop.PolicyKit1', '/org/freedesktop/PolicyKit1/Authority', False), 'org.freedesktop.PolicyKit1.Authority') subject = ('unix-process', {'pid': dbus.UInt32(pid, variant_level=1), 'start-time': dbus.UInt64(0, variant_level=1)}) action_id = privilege details = {"": ""} # <- empty strings allow type inference flags = dbus.UInt32(1) cancellation_id = "" return polkit.CheckAuthorization( subject, action_id, details, flags, cancellation_id, timeout=15) def _get_peer_pid(self, sender, conn): """ Get the process ID of the L{sender}. """ if self.dbus_info is None: self.dbus_info = dbus.Interface( conn.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus/Bus', False), 'org.freedesktop.DBus') return self.dbus_info.GetConnectionUnixProcessID(sender) def _is_local_call(self, sender, conn): """ Check if this is a local call, implying it is within a secure context. """ return (sender is None and conn is None) def _is_allowed_by_policy(self, sender, conn, privilege): """ Check if we are already in a secure context, and if not check if the policy associated with L{privilege} both exists and allows the peer to utilise it. As a side effect, if escalation of privileges is required then this will occur and a challenge will be generated if needs be. """ if self._is_local_call(sender, conn): return True peer_pid = self._get_peer_pid(sender, conn) (is_auth, _, details) = self._get_polkit_authorization(peer_pid, privilege) if not is_auth: raise self.permission_error(privilege) return True def listen(): """ Invoke a L{gobject.MainLoop} to process incoming DBus events. """ mainloop = GObject.MainLoop() mainloop.run() landscape-client-14.01/landscape/ui/lib/__init__.py0000644000175000017500000000000012301414317021751 0ustar andreasandreaslandscape-client-14.01/landscape/ui/view/0000755000175000017500000000000012301414317020056 5ustar andreasandreaslandscape-client-14.01/landscape/ui/view/tests/0000755000175000017500000000000012301414317021220 5ustar andreasandreaslandscape-client-14.01/landscape/ui/view/tests/test_configuration.py0000644000175000017500000006142212301414317025505 0ustar andreasandreasimport sys from landscape.ui.tests.helpers import ( ConfigurationProxyHelper, FakeGSettings, dbus_test_should_skip, dbus_skip_message, simulate_gtk_key_release, simulate_gtk_paste) if not dbus_test_should_skip: from gi.repository import Gtk from landscape.ui.view.configuration import ( ClientSettingsDialog, sanitise_host_name, is_valid_host_name) from landscape.ui.controller.configuration import ConfigController import landscape.ui.model.configuration.state from landscape.ui.model.configuration.state import ( COMPUTER_TITLE, ConfigurationModel) from landscape.ui.model.configuration.uisettings import UISettings from landscape.tests.helpers import LandscapeTest class ViewFunctionsTest(LandscapeTest): def test_sanitise_host_name(self): """ Test UI level host_name sanitation. """ self.assertEqual("foo.bar", sanitise_host_name(" foo.bar")) self.assertEqual("foo.bar", sanitise_host_name("foo.bar ")) self.assertEqual("foo.bar", sanitise_host_name(" foo.bar ")) def test_is_valid_host_name_ok(self): """ Test that valid host names cause L{is_valid_host_name} to return L{True}. """ self.assertTrue(is_valid_host_name("a")) self.assertTrue(is_valid_host_name("a.b")) self.assertTrue(is_valid_host_name("a.b.c")) self.assertTrue(is_valid_host_name("stop-squark")) self.assertTrue(is_valid_host_name("stop-squark.teale.DE")) self.assertTrue(is_valid_host_name("a2.b3.c4")) def test_is_valid_host_name_bad(self): """ Test that invalid host names cause L{is_valid_host_name} to return L{False}. """ self.assertFalse(is_valid_host_name(".a")) self.assertFalse(is_valid_host_name("a.")) self.assertFalse(is_valid_host_name("a b")) self.assertFalse(is_valid_host_name("a .b")) self.assertFalse(is_valid_host_name("a. b")) def test_is_valid_host_name_unicode(self): """ Test that host names containing Unicode cause L{is_valid_host_name} to return L{False}. """ self.assertFalse(is_valid_host_name(u"\xc3a")) if dbus_test_should_skip: skip = dbus_skip_message class ConfigurationViewTest(LandscapeTest): helpers = [ConfigurationProxyHelper] def setUp(self): self.default_data = {"management-type": "canonical", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} self.config_string = ( "[client]\n" "data_path = %s\n" "http_proxy = http://proxy.localdomain:3192\n" "tags = a_tag\n" "url = https://landscape.canonical.com/message-system\n" "account_name = foo\n" "registration_key = bar\n" "computer_title = baz\n" "https_proxy = https://proxy.localdomain:6192\n" "ping_url = http://landscape.canonical.com/ping\n" % sys.path[0]) super(ConfigurationViewTest, self).setUp() landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "me.here.com" settings = FakeGSettings(data=self.default_data) self.uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=self.uisettings) self.controller = ConfigController(model) def run_gtk_eventloop(self): """Run the Gtk event loop until all events have been processed.""" while Gtk.events_pending(): Gtk.main_iteration() def assert_paste_data_saved(self, dialog, combo, widget, attribute): """ Paste text into specified widget then verify data is saved. """ # Switch to local mode dialog.use_type_combobox.set_active(combo) self.run_gtk_eventloop() simulate_gtk_paste(widget, "pasted text") self.run_gtk_eventloop() self.assertTrue(self.controller.is_modified) self.assertEqual("pasted text", getattr(self.controller, attribute)) dialog.revert(None) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) def test_init(self): """ Test that we correctly initialise the L{ConfigurationView} correctly from the controller. """ dialog = ClientSettingsDialog(self.controller) content_area = dialog.get_content_area() self.assertEqual("preferences-management-service", dialog.get_default_icon_name()) children = content_area.get_children() self.assertEqual(len(children), 2) box = children[0] self.assertIsInstance(box, Gtk.Box) self.assertEqual(1, dialog.use_type_combobox.get_active()) def test_on_combobox_changed(self): """ Test that changes to the active selection in L{use_type_combobox} result in the correct panel becoming active and visible. """ dialog = ClientSettingsDialog(self.controller) iter = dialog.liststore.get_iter(0) no_service_frame = dialog.liststore.get(iter, 2)[0] iter = dialog.liststore.get_iter(1) hosted_service_frame = dialog.liststore.get(iter, 2)[0] iter = dialog.liststore.get_iter(2) local_service_frame = dialog.liststore.get(iter, 2)[0] self.assertEqual(1, dialog.use_type_combobox.get_active()) [alignment] = dialog.register_button.get_children() [hbox] = alignment.get_children() [image, label] = hbox.get_children() self.run_gtk_eventloop() self.assertIs(hosted_service_frame, dialog.active_widget) self.assertEqual(dialog.REGISTER_BUTTON_TEXT, label.get_text()) dialog.use_type_combobox.set_active(0) self.run_gtk_eventloop() self.assertIs(no_service_frame, dialog.active_widget) self.assertEqual(dialog.DISABLE_BUTTON_TEXT, label.get_text()) dialog.use_type_combobox.set_active(2) self.run_gtk_eventloop() self.assertIs(local_service_frame, dialog.active_widget) self.assertEqual(dialog.REGISTER_BUTTON_TEXT, label.get_text()) def test_modify(self): """ Test that modifications to data in the UI are propagated to the controller. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) self.assertEqual(1, dialog.use_type_combobox.get_active()) dialog.use_type_combobox.set_active(2) self.run_gtk_eventloop() self.assertTrue(self.controller.is_modified) dialog.revert(None) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) simulate_gtk_key_release(dialog.hosted_account_name_entry, "A") self.run_gtk_eventloop() self.assertTrue(self.controller.is_modified) dialog.revert(None) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) simulate_gtk_key_release(dialog.hosted_password_entry, "B") self.run_gtk_eventloop() self.assertTrue(self.controller.is_modified) dialog.revert(None) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) simulate_gtk_key_release(dialog.local_landscape_host_entry, "C") self.run_gtk_eventloop() self.assertTrue(self.controller.is_modified) def test_modify_with_paste(self): """ Non-keypress modifications to data in the UI are propagated to the controller. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() self.assertFalse(self.controller.is_modified) self.assertEqual(1, dialog.use_type_combobox.get_active()) # Test hosted account name self.assert_paste_data_saved(dialog, 1, dialog.hosted_account_name_entry, "hosted_account_name") # Test hosted password self.assert_paste_data_saved(dialog, 1, dialog.hosted_password_entry, "hosted_password") # Test local hostname self.assert_paste_data_saved(dialog, 2, dialog.local_landscape_host_entry, "local_landscape_host") # Test local password self.assert_paste_data_saved(dialog, 2, dialog.local_password_entry, "local_password") def test_load_data_from_config(self): """ Test that we load data into the appropriate entries from the configuration file. """ dialog = ClientSettingsDialog(self.controller) self.assertEqual(1, dialog.use_type_combobox.get_active()) self.assertEqual("foo", dialog.hosted_account_name_entry.get_text()) self.assertEqual("bar", dialog.hosted_password_entry.get_text()) self.assertEqual("", dialog.local_landscape_host_entry.get_text()) self.assertEqual("", dialog.local_password_entry.get_text()) def test_revert(self): """ Test that we can revert the UI values using the controller. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() self.assertEqual(1, dialog.use_type_combobox.get_active()) self.assertEqual("foo", dialog.hosted_account_name_entry.get_text()) self.assertEqual("bar", dialog.hosted_password_entry.get_text()) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("more.barn") self.run_gtk_eventloop() self.assertEqual("bar", dialog.hosted_password_entry.get_text()) self.assertEqual("more.barn", dialog.local_landscape_host_entry.get_text()) dialog.revert(None) self.run_gtk_eventloop() self.assertEqual(1, dialog.use_type_combobox.get_active()) self.assertEqual("foo", dialog.hosted_account_name_entry.get_text()) self.assertEqual("bar", dialog.hosted_password_entry.get_text()) def test_check_local_landscape_host_name_entry_ok(self): """ Test that L{check_local_landscape_host_name_entry} returns L{True} when the input is a valid host name. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo.bar") self.assertTrue(dialog.check_local_landscape_host_name_entry()) def test_check_local_landscape_host_name_entry_ok_not_recorded(self): """ Test that L{check_local_landscape_host_name_entry} does not add the entry to L{ClientSettingsDialog._errored_entries} when the input is a valid host name. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo.bar") dialog.check_local_landscape_host_name_entry() self.assertEqual(0, len(dialog._errored_entries)) def test_check_local_landscape_host_name_entry_bad_host_name(self): """ Test that L{check_local_landscape_host_name_entry} returns L{False} when the input is not a valid host name. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo bar") self.assertFalse(dialog.check_local_landscape_host_name_entry()) def test_check_local_landscape_host_name_entry_bad_recorded(self): """ Test that L{check_local_landscape_host_name_entry} does add the entry to L{ClientSettingsDialog._errored_entries} when the input is not a valid host name. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo bar") dialog.check_local_landscape_host_name_entry() self.assertEqual(1, len(dialog._errored_entries)) def test_check_local_landscape_host_name_entry_bad_error_type(self): """ Test that L{check_local_landscape_host_name_entry} adds the correct error type to L{ClientSettingsDialog._validation_errors} when the input is not a valid host name. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo bar") dialog.check_local_landscape_host_name_entry() self.assertEqual(set([dialog.INVALID_HOST_NAME]), dialog._validation_errors) def test_check_local_landscape_host_name_entry_unicode_in_host_name(self): """ Test that L{check_local_landscape_host_name_entry} returns L{False} when the input contains Unicode. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text(u"f\xc3.bar") self.assertFalse(dialog.check_local_landscape_host_name_entry()) def test_check_local_landscape_host_name_entry_unicode_recorded(self): """ Test that L{check_local_landscape_host_name_entry} does add the entry to L{ClientSettingsDialog._errored_entries} when the input contains Unicode. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text(u"f\xc3.bar") dialog.check_local_landscape_host_name_entry() self.assertEqual(1, len(dialog._errored_entries)) def test_check_local_landscape_host_name_entry_unicode_error_type(self): """ Test that L{check_local_landscape_host_name_entry} adds the correct error type to L{ClientSettingsDialog._validation_errors} when the input contains Unicode. """ dialog = ClientSettingsDialog(self.controller) dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text(u"f\xc3.bar") dialog.check_local_landscape_host_name_entry() self.assertEqual( set([dialog.INVALID_HOST_NAME, dialog.UNICODE_IN_ENTRY]), dialog._validation_errors) def test_check_entry_ok(self): """ Test that we return L{True} when the text of a L{Gtk.Entry} is valid input. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) self.run_gtk_eventloop() dialog.hosted_account_name_entry.set_text("Toodleoo") self.assertTrue(dialog.check_entry(dialog.hosted_account_name_entry)) def test_check_entry_doesnt_record_entry_when_ok(self): """ Test that, when the text of a L{Gtk.Entry} is valid nothing is added to L{ClientSettingsDialog._errored_entries}. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) self.run_gtk_eventloop() dialog.hosted_account_name_entry.set_text("Toodleoo") dialog.check_entry(dialog.hosted_account_name_entry) self.assertEqual(0, len(dialog._errored_entries)) def test_check_entry_non_ascii(self): """ Test that we return L{False} when the text of a L{Gtk.Entry} contains Unicode input. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) self.run_gtk_eventloop() dialog.hosted_account_name_entry.set_text(u"T\xc3dle\xc4") self.assertFalse(dialog.check_entry(dialog.hosted_account_name_entry)) def test_check_entry_records_entry_when_non_ascii(self): """ Test that, when the text of a L{Gtk.Entry} contains Unicode it is added to L{ClientSettingsDialog._errored_entries}. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) self.run_gtk_eventloop() dialog.hosted_account_name_entry.set_text(u"T\xc3dle\xc4") dialog.check_entry(dialog.hosted_account_name_entry) self.assertEqual(1, len(dialog._errored_entries)) def test_dismiss_validation_errors_local(self): """ Test that dismissing the validation errors tidies up indicators that have been set against local settings. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) self.run_gtk_eventloop() dialog.hosted_account_name_entry.set_text(u"T\xc3dle\xc4") dialog.hosted_password_entry.set_text(u"T\xc3dle\xc4") self.run_gtk_eventloop() dialog.validity_check() self.run_gtk_eventloop() self.assertEqual(2, len(dialog._errored_entries)) [entry1, entry2] = dialog._errored_entries self.assertEqual(Gtk.STOCK_DIALOG_WARNING, entry1.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) self.assertEqual(Gtk.STOCK_DIALOG_WARNING, entry2.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) dialog.dismiss_infobar(None) self.run_gtk_eventloop() self.assertEqual(0, len(dialog._errored_entries)) self.assertNotEqual( Gtk.STOCK_DIALOG_WARNING, entry1.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) self.assertNotEqual( Gtk.STOCK_DIALOG_WARNING, entry2.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) def test_dismiss_validation_errors_hosted(self): """ Test that dismissing the validation errors tidies up indicators that have been set against hosted fields. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(2) self.run_gtk_eventloop() dialog.local_landscape_host_entry.set_text("dodgy as hell") self.run_gtk_eventloop() dialog.validity_check() self.run_gtk_eventloop() self.assertEqual(1, len(dialog._errored_entries)) [entry1] = dialog._errored_entries self.assertEqual(Gtk.STOCK_DIALOG_WARNING, entry1.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) dialog.dismiss_infobar(None) self.run_gtk_eventloop() self.assertEqual(0, len(dialog._errored_entries)) self.assertNotEqual( Gtk.STOCK_DIALOG_WARNING, entry1.get_icon_stock(Gtk.EntryIconPosition.PRIMARY)) def test_validity_check_disabled(self): """ Test that the L{validity_check} returns True when we disable landscape client. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(0) self.run_gtk_eventloop() self.assertTrue(dialog.validity_check()) def test_validity_check_hosted(self): """ Test that the L{validity_check} returns True when the hosted fields are valid. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) dialog.hosted_account_name_entry.set_text("Bob") dialog.hosted_password_entry.set_text("the builder") self.run_gtk_eventloop() self.assertTrue(dialog.validity_check()) def test_validity_check_hosted_unicode(self): """ Test that the L{validity_check} returns False when the hosted fields contain Unicode. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(1) dialog.hosted_account_name_entry.set_text(u"B\xc3b") self.run_gtk_eventloop() self.assertFalse(dialog.validity_check()) def test_validity_check_local_ok(self): """ Test that the L{validity_check} returns True when the local fields are valid. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(2) self.run_gtk_eventloop() dialog.local_landscape_host_entry.set_text("foo.bar") self.run_gtk_eventloop() self.assertTrue(dialog.validity_check()) def test_validity_check_local_sanitisable(self): """ Test that the L{validity_check} returns True when the local fields are valid after sanitation. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text(" foo.bar") self.run_gtk_eventloop() self.assertTrue(dialog.validity_check()) dialog.local_landscape_host_entry.set_text("foo.bar ") self.run_gtk_eventloop() self.assertTrue(dialog.validity_check()) def test_validity_check_local_invalid_host_name(self): """ Test that the L{validity_check} returns False when the host name is invalid. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text("foo bar") self.run_gtk_eventloop() self.assertFalse(dialog.validity_check()) def test_validity_check_local_unicode(self): """ Test that the L{validity_check} returns False when the host name contains Unicode. """ dialog = ClientSettingsDialog(self.controller) self.run_gtk_eventloop() dialog.use_type_combobox.set_active(2) dialog.local_landscape_host_entry.set_text(u"f\xc3.bar") self.run_gtk_eventloop() self.assertFalse(dialog.validity_check()) if dbus_test_should_skip: skip = dbus_skip_message class LocalConfigurationViewTest(LandscapeTest): helpers = [ConfigurationProxyHelper] def setUp(self): self.default_data = {"management-type": "LDS", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": "manky"} self.config_string = ( "[client]\n" "data_path = %s\n" "url = https://landscape.localdomain/message-system\n" "computer_title = baz\n" "ping_url = http://landscape.localdomain/ping\n" % sys.path[0]) super(LocalConfigurationViewTest, self).setUp() landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "me.here.com" settings = FakeGSettings(data=self.default_data) self.uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=self.uisettings) self.controller = ConfigController(model) def test_init(self): """ Test that we correctly initialise the L{ConfigurationView} correctly from the controller. """ dialog = ClientSettingsDialog(self.controller) while Gtk.events_pending(): Gtk.main_iteration() content_area = dialog.get_content_area() children = content_area.get_children() self.assertEqual(len(children), 2) box = children[0] self.assertIsInstance(box, Gtk.Box) self.assertEqual(2, dialog.use_type_combobox.get_active()) def test_load_data_from_config(self): """ Test that we load data into the appropriate entries from the configuration file. """ dialog = ClientSettingsDialog(self.controller) while Gtk.events_pending(): Gtk.main_iteration() self.assertEqual(2, dialog.use_type_combobox.get_active()) self.assertEqual("", dialog.hosted_account_name_entry.get_text()) self.assertEqual("", dialog.hosted_password_entry.get_text()) self.assertEqual("landscape.localdomain", dialog.local_landscape_host_entry.get_text()) self.assertEqual("manky", dialog.local_password_entry.get_text()) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/view/tests/__init__.py0000644000175000017500000000000012301414317023317 0ustar andreasandreaslandscape-client-14.01/landscape/ui/view/__init__.py0000644000175000017500000000000012301414317022155 0ustar andreasandreaslandscape-client-14.01/landscape/ui/view/configuration.py0000644000175000017500000003274212301414317023307 0ustar andreasandreasimport re import os from gettext import gettext as _ from gi.repository import GObject, Gtk from landscape.ui.constants import ( CANONICAL_MANAGED, LOCAL_MANAGED, NOT_MANAGED) # Note, I think this may not be fully compliant with the changes in RFC 1123 HOST_NAME_REGEXP = re.compile("^(([a-zA-Z][a-zA-Z0-9\-]*)?[a-zA-Z0-9][\.]?)*" "(([A-Za-z][A-Za-z0-9\-]*)?[A-Za-z0-9])$") def sanitise_host_name(host_name): """ Do some minimal host name sanitation. """ return host_name.strip() def is_valid_host_name(host_name): """ Check that the provided host name complies with L{HOST_NAME_REGEXP} and is therefor valid. """ return HOST_NAME_REGEXP.match(host_name) is not None def is_ascii(text): """ Test that the provided string contains only characters from the ASCII set. """ try: text.decode("ascii") return True except UnicodeDecodeError: return False class ClientSettingsDialog(Gtk.Dialog): """ L{ClientSettingsDialog} is a subclass of Gtk.Dialog that loads the UI components from the associated Glade XML file and wires everything up to the controller. """ GLADE_FILE = "landscape-client-settings.glade" INVALID_HOST_NAME = 0 UNICODE_IN_ENTRY = 1 def __init__(self, controller): super(ClientSettingsDialog, self).__init__( title=_("Landscape Service"), flags=Gtk.DialogFlags.MODAL) self.set_default_icon_name("preferences-management-service") self.set_resizable(False) self._initialised = False self._validation_errors = set() self._errored_entries = [] self.controller = controller self.setup_ui() self.load_data() # One extra revert to reset after loading data self.controller.revert() def indicate_error_on_entry(self, entry): """ Show a warning icon on a L{Gtk.Entry} to indicate some associated error. """ entry.set_icon_from_stock( Gtk.EntryIconPosition.PRIMARY, Gtk.STOCK_DIALOG_WARNING) self._errored_entries.append(entry) def check_local_landscape_host_name_entry(self): host_name = sanitise_host_name( self.local_landscape_host_entry.get_text()) ascii_ok = is_ascii(host_name) host_name_ok = is_valid_host_name(host_name) if ascii_ok and host_name_ok: self.local_landscape_host_entry.set_text(host_name) return True else: self.indicate_error_on_entry(self.local_landscape_host_entry) if not host_name_ok: self._validation_errors.add(self.INVALID_HOST_NAME) if not ascii_ok: self._validation_errors.add(self.UNICODE_IN_ENTRY) return False def check_entry(self, entry): """ Check that the text content of a L{Gtk.Entry} is acceptable. """ if is_ascii(entry.get_text()): return True else: self.indicate_error_on_entry(entry) self._validation_errors.add(self.UNICODE_IN_ENTRY) return False def validity_check(self): self._validation_errors = set() if self._info_bar_container.get_visible(): self.dismiss_infobar(None) active_iter = self.liststore.get_iter( self.use_type_combobox.get_active()) [management_type] = self.liststore.get(active_iter, 0) if management_type == NOT_MANAGED: return True elif management_type == CANONICAL_MANAGED: account_name_ok = self.check_entry(self.hosted_account_name_entry) password_ok = self.check_entry(self.hosted_password_entry) return account_name_ok and password_ok else: host_name_ok = self.check_local_landscape_host_name_entry() password_ok = self.check_entry(self.local_password_entry) return host_name_ok and password_ok @property def NO_SERVICE_TEXT(self): return _("None") @property def HOSTED_SERVICE_TEXT(self): return _("Landscape - hosted by Canonical") @property def LOCAL_SERVICE_TEXT(self): return _("Landscape - dedicated server") @property def REGISTER_BUTTON_TEXT(self): return _("Register") @property def DISABLE_BUTTON_TEXT(self): return _("Disable") @property def INVALID_HOST_NAME_MESSAGE(self): return _("Invalid host name.") @property def UNICODE_IN_ENTRY_MESSAGE(self): return _("Only ASCII characters are allowed.") def _set_use_type_combobox_from_controller(self): """ Load the persisted L{management_type} from the controller and set the combobox appropriately. Note that Gtk makes us jump through some hoops by having it's own model level to deal with here. The conversion between paths and iters makes more sense if you understand that treeviews use the same model. """ list_iter = self.liststore.get_iter_first() while (self.liststore.get(list_iter, 0)[0] != self.controller.management_type): list_iter = self.liststore.iter_next(list_iter) path = self.liststore.get_path(list_iter) [index] = path.get_indices() self.use_type_combobox.set_active(index) def _set_hosted_values_from_controller(self): self.hosted_account_name_entry.set_text( self.controller.hosted_account_name) self.hosted_password_entry.set_text(self.controller.hosted_password) def _set_local_values_from_controller(self): self.local_landscape_host_entry.set_text( self.controller.local_landscape_host) self.local_password_entry.set_text(self.controller.local_password) def load_data(self): self._initialised = False self.controller.load() self._set_hosted_values_from_controller() self._set_local_values_from_controller() self._set_use_type_combobox_from_controller() self._initialised = True def make_liststore(self): """ Construct the correct L{Gtk.ListStore} to drive the L{Gtk.ComboBox} for use-type. This a table of: * Management type (key) * Text to display in the combobox * L{Gtk.Frame} to load when this item is selected. """ liststore = Gtk.ListStore(GObject.TYPE_PYOBJECT, GObject.TYPE_STRING, GObject.TYPE_PYOBJECT) self.active_widget = None liststore.append([NOT_MANAGED, self.NO_SERVICE_TEXT, self._builder.get_object("no-service-frame")]) liststore.append([CANONICAL_MANAGED, self.HOSTED_SERVICE_TEXT, self._builder.get_object("hosted-service-frame")]) liststore.append([LOCAL_MANAGED, self.LOCAL_SERVICE_TEXT, self._builder.get_object("local-service-frame")]) return liststore def link_hosted_service_widgets(self): self.hosted_account_name_entry = self._builder.get_object( "hosted-account-name-entry") self.hosted_account_name_entry.connect( "changed", self.on_changed_event, "hosted_account_name") self.hosted_password_entry = self._builder.get_object( "hosted-password-entry") self.hosted_password_entry.connect( "changed", self.on_changed_event, "hosted_password") def link_local_service_widgets(self): self.local_landscape_host_entry = self._builder.get_object( "local-landscape-host-entry") self.local_landscape_host_entry.connect( "changed", self.on_changed_event, "local_landscape_host") self.local_password_entry = self._builder.get_object( "local-password-entry") self.local_password_entry.connect( "changed", self.on_changed_event, "local_password") def link_use_type_combobox(self, liststore): self.use_type_combobox = self._builder.get_object("use-type-combobox") self.use_type_combobox.connect("changed", self.on_combo_changed) self.use_type_combobox.set_model(liststore) cell = Gtk.CellRendererText() self.use_type_combobox.pack_start(cell, True) self.use_type_combobox.add_attribute(cell, 'text', 1) def cancel_response(self, widget): self.response(Gtk.ResponseType.CANCEL) def register_response(self, widget): if self.validity_check(): self.response(Gtk.ResponseType.OK) else: error_text = [] if self.UNICODE_IN_ENTRY in self._validation_errors: error_text.append(self.UNICODE_IN_ENTRY_MESSAGE) if self.INVALID_HOST_NAME in self._validation_errors: error_text.append(self.INVALID_HOST_NAME_MESSAGE) self.info_message.set_text("\n".join(error_text)) self._info_bar_container.show() def set_button_text(self, management_type): [alignment] = self.register_button.get_children() [hbox] = alignment.get_children() [image, label] = hbox.get_children() if management_type == NOT_MANAGED: label.set_text(self.DISABLE_BUTTON_TEXT) else: label.set_text(self.REGISTER_BUTTON_TEXT) def setup_buttons(self): self.revert_button = Gtk.Button(stock=Gtk.STOCK_REVERT_TO_SAVED) self.action_area.pack_start(self.revert_button, True, True, 0) self.revert_button.connect("clicked", self.revert) self.revert_button.show() self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL) self.action_area.pack_start(self.cancel_button, True, True, 0) self.cancel_button.show() self.cancel_button.connect("clicked", self.cancel_response) self.register_button = Gtk.Button(stock=Gtk.STOCK_OK) self.action_area.pack_start(self.register_button, True, True, 0) self.register_button.show() self.register_button.connect("clicked", self.register_response) def dismiss_infobar(self, widget): self._info_bar_container.hide() for entry in self._errored_entries: entry.set_icon_from_stock(Gtk.EntryIconPosition.PRIMARY, None) self._errored_entries = [] def setup_info_bar(self): labels_size_group = self._builder.get_object("labels-sizegroup") entries_size_group = self._builder.get_object("entries-sizegroup") labels_size_group.set_ignore_hidden(False) entries_size_group.set_ignore_hidden(False) self._info_bar_container = Gtk.HBox() self._info_bar_container.set_spacing(12) info_bar = Gtk.InfoBar() entries_size_group.add_widget(info_bar) info_bar.show() empty_label = Gtk.Label() labels_size_group.add_widget(empty_label) empty_label.show() self._info_bar_container.pack_start(empty_label, expand=False, fill=False, padding=0) self._info_bar_container.pack_start(info_bar, expand=False, fill=False, padding=0) content_area = info_bar.get_content_area() hbox = Gtk.HBox() self.info_message = Gtk.Label() self.info_message.set_alignment(0, 0.5) self.info_message.show() hbox.pack_start(self.info_message, expand=True, fill=True, padding=6) ok_button = Gtk.Button("Dismiss") ok_button.connect("clicked", self.dismiss_infobar) ok_button.show() hbox.pack_start(ok_button, expand=True, fill=True, padding=0) hbox.show() content_area.pack_start(hbox, expand=True, fill=True, padding=0) def setup_ui(self): self._builder = Gtk.Builder() self._builder.set_translation_domain("landscape-client") self._builder.add_from_file( os.path.join( os.path.dirname(__file__), "ui", self.GLADE_FILE)) content_area = self.get_content_area() content_area.set_spacing(12) self.set_border_width(12) self.setup_info_bar() self._vbox = self._builder.get_object("toplevel-vbox") self._vbox.unparent() content_area.pack_start(self._vbox, expand=True, fill=True, padding=12) self._vbox.pack_start(self._info_bar_container, expand=False, fill=False, padding=0) self.liststore = self.make_liststore() self.link_use_type_combobox(self.liststore) self.link_hosted_service_widgets() self.link_local_service_widgets() self.setup_buttons() def on_combo_changed(self, combobox): list_iter = self.liststore.get_iter(combobox.get_active()) if not self.active_widget is None: self._vbox.remove(self.active_widget) [management_type] = self.liststore.get(list_iter, 0) self.set_button_text(management_type) if self._initialised: self.controller.management_type = management_type self.controller.modify() [self.active_widget] = self.liststore.get(list_iter, 2) self.active_widget.unparent() self._vbox.add(self.active_widget) def on_changed_event(self, widget, attribute): setattr(self.controller, attribute, widget.get_text()) self.controller.modify() def quit(self, *args): self.destroy() def revert(self, button): self.controller.revert() self.load_data() # One extra revert to reset after loading data self.controller.revert() landscape-client-14.01/landscape/ui/view/ui/0000755000175000017500000000000012301414317020473 5ustar andreasandreaslandscape-client-14.01/landscape/ui/view/ui/landscape-client-settings.glade0000644000175000017500000005530512301414317026545 0ustar andreasandreas False 5 dialog False vertical 2 False end gtk-revert-to-saved False True True True False True False True 0 gtk-cancel False True True True False True False True 1 gtk-ok False True True True False True False True 2 False True end 0 True False vertical 12 True False 12 True False 48 preferences-management-service False True 0 True False vertical True False Landscape is a remote administration service from Canonical. If you allow it, a Landscape server can monitor this computer's performance and send administration commands. True False True 0 Find out more... False True False True True start start 6 False none https://landscape.canonical.com False True 1 False True 1 False True 0 True False 12 True False 1 Landscape service: False True 0 True False True False True 1 False True 1 False True 1 revertbutton cancel-button ok-button False True False 0 none True False True False 12 12 True False 1 Account name: right 0 0 1 1 True False 1 Registration Key: right 0 1 1 1 True False 1 Don't have an account? 0 2 1 1 True True True 1 0 1 1 True True True False 1 1 1 1 Sign up... False True False True True False none 0 https://landscape.canonical.com 1 2 1 1 False True False 0 none True False True False 12 12 True False 1 Registration Key: 0 1 1 1 True False 1 Landscape server hostname: 0 0 1 1 True True True False True 0.25999999046325684 1 1 1 1 True True 1 0 1 1 False True False 0 none True False True False 12 True False 0.059999998658895493 0 If you click "Disable" the Landscape client on this machine will be disabled. You can reenable it later by revisiting this dialog. True end 1 0 1 1 True False 1 0 0 1 1 landscape-client-14.01/landscape/ui/constants.py0000644000175000017500000000011212301414317021464 0ustar andreasandreasCANONICAL_MANAGED = "canonical" LOCAL_MANAGED = "LDS" NOT_MANAGED = "not" landscape-client-14.01/landscape/ui/tests/0000755000175000017500000000000012301414317020246 5ustar andreasandreaslandscape-client-14.01/landscape/ui/tests/helpers.py0000644000175000017500000001416712301414317022273 0ustar andreasandreasimport os from lxml import etree import dbus from landscape.configuration import LandscapeSetupConfiguration dbus_test_should_skip = False dbus_skip_message = "Cannot launch private DBus session without X11" try: from gi.repository import GObject, Gtk got_gobject_introspection = True # Shut up pyflakes dbus_skip_message = GObject._version + str(Gtk.MAJOR_VERSION) except (ImportError, RuntimeError): got_gobject_introspection = False dbus_test_should_skip = True dbus_skip_message = "GObject Introspection module unavailable" bus = object bus_name = "" if got_gobject_introspection: from gi.repository import Gdk from landscape.ui.model.configuration.mechanism import ( INTERFACE_NAME, ConfigurationMechanism) from landscape.ui.model.configuration.proxy import ConfigurationProxy # We have to do these steps because the ConfigurationMechanism inherits # from dbus.service.Object which throws a fit if it notices you using # it without a mainloop. dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) try: bus = dbus.SessionBus(private=True) bus_name = dbus.service.BusName(INTERFACE_NAME, bus) except dbus.exceptions.DBusException: bus = object bus_name = "" dbus_test_should_skip = True dbus_skip_message = "Cannot launch private DBus session without X11" class ConfigurationProxyHelper(object): """ L{ConfigurationProxyHelper} will provide it's test case with a L{ConfigurationProxy} setup in such a way that it uses a real L{ConfigurationMechanism} (which in turn uses a real L{LandscapeSetupConfiguration}) but which does not make use of DBus for communication. Tests utilising this helper must define a L{test_case.config_string} for use in L{set_up} below. """ def set_up(self, test_case): if not dbus_test_should_skip: test_case.config_filename = test_case.makeFile( test_case.config_string) test_case.config = LandscapeSetupConfiguration() test_case.config.default_config_filenames = \ [test_case.config_filename] if got_gobject_introspection: test_case.mechanism = ConfigurationMechanism(test_case.config, bus_name) test_case.proxy = ConfigurationProxy( interface=test_case.mechanism) test_case.proxy.load(["-c", test_case.config_filename]) def tear_down(self, test_case): if not dbus_test_should_skip and got_gobject_introspection: test_case.mechanism.remove_from_connection() class FakeGSettings(object): """ This class impersonates a real L{gi.repostiroy.Gio.GSettings} object to allow for testing code that utilises it without setting values in the live DConf. """ calls = {} def __init__(self, data={}): self.set_data(data) tree = etree.parse( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../../", "glib-2.0/schemas/", "com.canonical.landscape-client-settings.gschema.xml")) root = tree.getroot() self.schema = root.find("schema") assert(self.schema.attrib["id"] == "com.canonical.landscape-client-settings") self.keys = {} for key in self.schema.findall("key"): self.keys[key.attrib["name"]] = key.attrib["type"] def check_key_data(self, name, gstype): if name in self.keys: if self.keys[name] == gstype: return True else: raise ValueError("The GSchema file says %s is a %s, " + "but you asked for a %s" % (name, self.keys[name], gstype)) else: raise KeyError("Can't find %s in the GSchema file!" % name) def get_value(self, name, gstype): if self.check_key_data(name, gstype): return self.data[name] def set_value(self, name, gstype, value): if self.check_key_data(name, gstype): self.data[name] = value def set_data(self, data): self.data = data def _call(self, name, *args): [count, arglist] = self.calls.get(name, (0, [])) count += 1 arglist.append(self._args_to_string(*args)) self.calls[name] = [count, arglist] def _args_to_string(self, *args): return "|".join([str(arg) for arg in args]) def new(self, key): self._call("new", key) return self def connect(self, signal, callback, *args): self._call("connect", signal, callback, *args) def get_boolean(self, name): self._call("get_boolean", name) return self.get_value(name, "b") def set_boolean(self, name, value): self._call("set_boolean", name, value) self.set_value(name, "b", value) def get_string(self, name): self._call("get_string", name) return self.get_value(name, "s") def set_string(self, name, value): self._call("set_string", name, value) self.set_value(name, "s", value) def was_called(self, name): return self.calls.haskey(name) def was_called_with_args(self, name, *args): try: [count, arglist] = self.calls.get(name, (0, [])) except KeyError: return False expected_args = self._args_to_string(*args) return expected_args in arglist def simulate_gtk_key_release(widget, key): """ Simulates a keypress in a widget @param widget: The widget which should receive the keypress. @param key: The key to use. """ widget.insert_text(key, -1) def simulate_gtk_paste(widget, pasted_text): """ Simulates pasting text into a editable element. @param widget: The widget which should receive the paste. @param pasted_text: The text to paste into the widget. """ widget.set_text("") clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) clipboard.set_text(pasted_text, len=-1) widget.set_position(0) widget.paste_clipboard() landscape-client-14.01/landscape/ui/tests/__init__.py0000644000175000017500000000000012301414317022345 0ustar andreasandreaslandscape-client-14.01/landscape/ui/__init__.py0000644000175000017500000000000012301414317021203 0ustar andreasandreaslandscape-client-14.01/landscape/ui/controller/0000755000175000017500000000000012301414317021267 5ustar andreasandreaslandscape-client-14.01/landscape/ui/controller/tests/0000755000175000017500000000000012301414317022431 5ustar andreasandreaslandscape-client-14.01/landscape/ui/controller/tests/test_configuration.py0000644000175000017500000002053312301414317026714 0ustar andreasandreasfrom landscape.ui.tests.helpers import ( ConfigurationProxyHelper, dbus_test_should_skip, dbus_skip_message, FakeGSettings) if not dbus_test_should_skip: from landscape.ui.controller.configuration import ConfigController import landscape.ui.model.configuration.state from landscape.ui.model.configuration.state import ( ConfigurationModel, COMPUTER_TITLE) from landscape.ui.model.configuration.uisettings import UISettings from landscape.tests.helpers import LandscapeTest class ConfigControllerTest(LandscapeTest): helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "\n".join( ["[client]", "data_path = /var/lib/landscape/client/", "http_proxy = http://proxy.localdomain:3192", "tags = a_tag", "url = https://landscape.canonical.com/message-system", "account_name = foo", "registration_key = bar", "computer_title = baz", "https_proxy = https://proxy.localdomain:6192", "ping_url = http://landscape.canonical.com/ping"]) self.default_data = {"management-type": "canonical", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} super(ConfigControllerTest, self).setUp() landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "me.here.com" settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) def disable(on_suceed, on_fail): pass def register(on_notify, on_error, on_succeed, on_fail): pass self.controller = ConfigController(model) self.controller.disable = disable self.controller.register = register self.controller.load() def test_init(self): """ Test that when we create a controller it has initial state read in directly from the configuration file. """ self.controller.load() self.assertEqual("baz", self.controller.computer_title) self.assertTrue(self.controller.management_type) self.assertEqual("landscape.canonical.com", self.controller.hosted_landscape_host) self.assertEqual("foo", self.controller.hosted_account_name) self.assertEqual("bar", self.controller.hosted_password) self.assertEqual("", self.controller.local_landscape_host) self.assertEqual("standalone", self.controller.local_account_name) self.assertEqual("", self.controller.local_password) def test_set_hosted_account_name(self): """ Test that we can set the L{hosted_account_name} property. """ self.controller.load() self.assertEqual(self.controller.hosted_account_name, "foo") self.controller.hosted_account_name = "shoe" self.assertEqual(self.controller.hosted_account_name, "shoe") def test_set_local_account_name(self): """ Test that we can set the L{local_account_name} property. """ self.controller.load() self.assertEqual("standalone", self.controller.local_account_name) self.controller.local_account_name = "shoe" self.assertEqual(self.controller.local_account_name, "shoe") def test_set_hosted_password(self): """ Test that we can set the L{hosted_password} property. """ self.controller.load() self.assertEqual(self.controller.hosted_password, "bar") self.controller.hosted_password = "nucker" self.assertEqual(self.controller.hosted_password, "nucker") def test_set_local_password(self): """ Test that we can set the L{local_password} property. """ self.controller.load() self.assertEqual(self.controller.local_password, "") self.controller.local_password = "nucker" self.assertEqual(self.controller.local_password, "nucker") def test_set_local_landscape_host(self): """ Test that we can set the L{local_landscape_host} property. """ self.controller.load() self.assertEqual("", self.controller.local_landscape_host) self.controller.local_landscape_host = "smelly.pants" self.assertEqual(self.controller.local_landscape_host, "smelly.pants") def test_revert(self): """ Test that we can revert the controller to it's initial state. """ self.controller.load() self.assertEqual(self.controller.hosted_account_name, "foo") self.controller.hosted_account_name = "Hildaborg" self.assertEqual(self.controller.hosted_account_name, "Hildaborg") self.controller.revert() self.assertEqual(self.controller.hosted_account_name, "foo") def test_is_modified(self): """ Test that we can determine when something has been modified. """ self.controller.load() self.assertFalse(self.controller.is_modified) self.controller.local_landscape_host = "bing.bang.a.bang" self.assertTrue(self.controller.is_modified) self.controller.revert() self.assertFalse(self.controller.is_modified) self.controller.account_name = "soldierBlue" self.assertTrue(self.controller.is_modified) self.controller.revert() self.assertFalse(self.controller.is_modified) self.controller.registration_key = "HesAnIndianCowboyInTheRodeo" self.assertTrue(self.controller.is_modified) def test_persist(self): """ Test that we can write configuration settings back to the config file. """ self.controller.load() self.assertEqual("", self.controller.local_landscape_host) self.controller.local_landscape_host = "landscape.localdomain" self.assertEqual("landscape.localdomain", self.controller.local_landscape_host) self.controller.persist(None, None, None, None) self.assertEqual("landscape.localdomain", self.controller.local_landscape_host) self.controller.local_landscape_host = "boo" self.controller.revert() self.assertEqual("landscape.localdomain", self.controller.local_landscape_host) if dbus_test_should_skip: skip = dbus_skip_message class EmptyConfigControllerTest(LandscapeTest): helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "" self.default_data = {"management-type": "not", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} super(EmptyConfigControllerTest, self).setUp() landscape.ui.model.configuration.state.DEFAULT_DATA[COMPUTER_TITLE] \ = "me.here.com" settings = FakeGSettings(data=self.default_data) uisettings = UISettings(settings) model = ConfigurationModel(proxy=self.proxy, uisettings=uisettings) self.controller = ConfigController(model) self.controller.load() def test_defaulting(self): """ Test we set the correct values when loading a blank configuration. """ self.controller.load() self.assertEqual("not", self.controller.management_type) self.assertEqual("", self.controller.hosted_account_name) self.assertEqual("", self.controller.hosted_password) self.assertEqual("landscape.canonical.com", self.controller.hosted_landscape_host) self.assertEqual("standalone", self.controller.local_account_name) self.assertEqual("", self.controller.local_password) self.assertEqual("me.here.com", self.controller.computer_title) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/controller/tests/test_app.py0000644000175000017500000001136512301414317024630 0ustar andreasandreasimport sys from landscape.ui.tests.helpers import ( ConfigurationProxyHelper, dbus_test_should_skip, dbus_skip_message, FakeGSettings) if not dbus_test_should_skip: from gi.repository import Gtk from landscape.ui.controller.app import SettingsApplicationController from landscape.ui.controller.configuration import ConfigController from landscape.ui.view.configuration import ClientSettingsDialog from landscape.ui.model.configuration.uisettings import UISettings else: SettingsApplicationController = object from landscape.tests.helpers import LandscapeTest class ConnectionRecordingSettingsApplicationController( SettingsApplicationController): _connections = set() _connection_args = {} _connection_kwargs = {} def __init__(self, get_config=None, get_uisettings=None): super(ConnectionRecordingSettingsApplicationController, self).__init__() if get_config: self.get_config = get_config if get_uisettings: self.get_uisettings = get_uisettings def _make_connection_name(self, signal, func): return signal + ">" + func.__name__ def _record_connection(self, signal, func, *args, **kwargs): connection_name = self._make_connection_name(signal, func) self._connections.add(connection_name) signal_connection_args = self._connection_args.get( connection_name, []) signal_connection_args.append(repr(args)) self._connection_args = signal_connection_args signal_connection_kwargs = self._connection_kwargs.get( connection_name, []) signal_connection_kwargs.append(repr(kwargs)) self._connection_kwargs = signal_connection_kwargs def is_connected(self, signal, func): connection_name = self._make_connection_name(signal, func) return self._connections.issuperset(set([connection_name])) def connect(self, signal, func, *args, **kwargs): self._record_connection(signal, func) class SettingsApplicationControllerInitTest(LandscapeTest): def setUp(self): super(SettingsApplicationControllerInitTest, self).setUp() def test_init(self): """ Test we connect activate to something useful on application initialisation. """ app = ConnectionRecordingSettingsApplicationController() self.assertTrue(app.is_connected("activate", app.setup_ui)) if dbus_test_should_skip: skip = dbus_skip_message class SettingsApplicationControllerUISetupTest(LandscapeTest): helpers = [ConfigurationProxyHelper] def setUp(self): self.config_string = "\n".join( ["[client]", "data_path = %s" % sys.path[0], "http_proxy = http://proxy.localdomain:3192", "tags = a_tag", "url = https://landscape.canonical.com/message-system", "account_name = foo", "registration_key = bar", "computer_title = baz", "https_proxy = https://proxy.localdomain:6192", "ping_url = http://landscape.canonical.com/ping"]) self.default_data = {"management-type": "not", "computer-title": "", "hosted-landscape-host": "", "hosted-account-name": "", "hosted-password": "", "local-landscape-host": "", "local-account-name": "", "local-password": ""} super(SettingsApplicationControllerUISetupTest, self).setUp() def fake_run(obj): """ Retard X11 mapping. """ pass self._real_run = Gtk.Dialog.run Gtk.Dialog.run = fake_run def get_config(): return self.proxy def get_uisettings(): settings = FakeGSettings(data=self.default_data) return UISettings(settings) self.app = ConnectionRecordingSettingsApplicationController( get_config=get_config, get_uisettings=get_uisettings) def tearDown(self): Gtk.Dialog.run = self._real_run super( SettingsApplicationControllerUISetupTest, self).tearDown() def test_setup_ui(self): """ Test that we correctly setup the L{ClientSettingsDialog} with the config object and correct data """ self.assertRaises(SystemExit, self.app.setup_ui, data=None, asynchronous=False) self.assertIsInstance(self.app.settings_dialog, ClientSettingsDialog) self.assertIsInstance(self.app.settings_dialog.controller, ConfigController) if dbus_test_should_skip: skip = dbus_skip_message landscape-client-14.01/landscape/ui/controller/tests/__init__.py0000644000175000017500000000000012301414317024530 0ustar andreasandreaslandscape-client-14.01/landscape/ui/controller/__init__.py0000644000175000017500000000000012301414317023366 0ustar andreasandreaslandscape-client-14.01/landscape/ui/controller/app.py0000644000175000017500000000656512301414317022435 0ustar andreasandreasimport sys from gettext import gettext as _ from gi.repository import Gio, Gtk, Notify from landscape.ui.model.configuration.proxy import ConfigurationProxy from landscape.ui.model.configuration.state import ConfigurationModel from landscape.ui.model.configuration.uisettings import UISettings from landscape.ui.view.configuration import ClientSettingsDialog from landscape.ui.controller.configuration import ConfigController APPLICATION_ID = "com.canonical.landscape-client.settings.ui" NOTIFY_ID = "Landscape management service" class SettingsApplicationController(Gtk.Application): """ Core application controller for the landscape settings application. """ def __init__(self, args=[]): super(SettingsApplicationController, self).__init__( application_id=APPLICATION_ID) self._args = args self.connect("activate", self.setup_ui) def get_config(self): return ConfigurationProxy() def get_uisettings(self): return UISettings(Gio.Settings) def on_notify(self, message): notification = Notify.Notification.new(NOTIFY_ID, message, "dialog-information") notification.show() def on_error(self, message): notification = Notify.Notification.new(NOTIFY_ID, message, "dialog-information") notification.show() def on_succeed(self, action=None): if action: message = action else: message = _("Success.") notification = Notify.Notification.new(NOTIFY_ID, message, "dialog-information") notification.show() def on_fail(self, action=None): if action: message = action else: message = _("Failure.") notification = Notify.Notification.new(NOTIFY_ID, message, "dialog-information") notification.show() def setup_ui(self, data=None, asynchronous=True): """ L{setup_ui} wires the model to the L{ConfigurationController} and then invokes the view with the controller. When the dialog exits appropriate termination is triggered. @param data: the Gtk callback could pass this, but it is always None in practice. @param asynchronous: a parameter passed through to L{ConfigurationController.exit}, it indicates whether the exit method should be called asynchronously. Is makes testing easier to use it synchronously. """ Notify.init(NOTIFY_ID) config = self.get_config() uisettings = self.get_uisettings() model = ConfigurationModel(proxy=config, proxy_loadargs=self._args, uisettings=uisettings) controller = ConfigController(model) if controller.load(): self.settings_dialog = ClientSettingsDialog(controller) if self.settings_dialog.run() == Gtk.ResponseType.OK: controller.persist(self.on_notify, self.on_error, self.on_succeed, self.on_fail) controller.exit(asynchronous=asynchronous) self.settings_dialog.destroy() else: self.on_fail(action=_("Authentication failed")) sys.stderr.write("Authentication failed.\n") landscape-client-14.01/landscape/ui/controller/configuration.py0000644000175000017500000001041312301414317024507 0ustar andreasandreasimport logging from gettext import gettext as _ from landscape.ui.constants import NOT_MANAGED, CANONICAL_MANAGED from landscape.ui.model.registration.proxy import RegistrationProxy from landscape.ui.model.configuration.state import StateError class ConfigControllerLockError(Exception): pass class ConfigController(object): """ L{ConfigContoller} defines actions to take against a configuration object, providing starting values from the file, allowing them to be changed transiently, reverted or committed. """ DEFAULT_DEDICATED_ACCOUNT_NAME = "standalone" def __init__(self, configuration): self._observers = [] self._configuration = configuration self._initialised = True def __getattr__(self, name): if name in self.__dict__: return self.__dict__[name] else: return getattr(self._configuration, name) def __setattr__(self, name, value): # this test allows attributes to be set in the __init__ method if not '_initialised' in self.__dict__: return object.__setattr__(self, name, value) if name in ConfigController.__dict__: return object.__setattr__(self, name, value) else: try: setattr(self._configuration, name, value) self._configuration.modify() except AttributeError: return object.__setattr__(self, name, value) else: self._configuration.modify() def load(self): """ Load the initial data from the configuration. """ return self._configuration.load_data() def revert(self): """ Revert settings to those the configuration object originally found. """ try: self._configuration.revert() except StateError: # We probably don't care. logging.info("landscape-client-settings-ui reverted with no " "changes to revert.") def persist(self, on_notify, on_error, on_succeed, on_fail): """Persist settings via the configuration object.""" try: self._configuration.persist() except StateError: # We probably don't care. logging.info("landscape-client-settings-ui committed with no " "changes to commit.") if self._configuration.management_type == NOT_MANAGED: self.disable(on_notify, on_succeed, on_fail) else: self.register(on_notify, on_error, on_succeed, on_fail) def register(self, notify_method, error_method, succeed_method, fail_method): """ Perform registration using the L{RegistrationProxy}. """ def registration_fail_wrapper(): fail_method(action=_("Registering client failed")) def registration_succeed_wrapper(): succeed_method(action=_("Registering client was successful")) registration = RegistrationProxy( on_register_notify=notify_method, on_register_error=error_method, on_register_succeed=registration_succeed_wrapper, on_register_fail=registration_fail_wrapper) if self._configuration.management_type == CANONICAL_MANAGED: notify_method(_("Attempting to register at %s") % self._configuration.hosted_landscape_host) else: notify_method(_("Attempting to register at %s") % self._configuration.local_landscape_host) registration.register(self._configuration.get_config_filename()) registration.exit() def disable(self, notify_method, succeed_method, fail_method): """ Disable landscape client via the L{RegistrationProxy}. """ def disabling_fail_wrapper(): fail_method(action=_("Disabling client failed")) def disabling_succeed_wrapper(): succeed_method(action=_("Disabling client was successful")) registration = RegistrationProxy( on_disable_succeed=disabling_succeed_wrapper, on_disable_fail=disabling_fail_wrapper) notify_method(_("Attempting to disable landscape client.")) registration.disable() registration.exit() landscape-client-14.01/apt-update/0000755000175000017500000000000012301414317016601 5ustar andreasandreaslandscape-client-14.01/apt-update/Makefile0000644000175000017500000000013512301414317020240 0ustar andreasandreasNAME = apt-update $(NAME): $(NAME).c $(CC) $(CFLAGS) -Wall $< -o $@ clean: rm -f $(NAME) landscape-client-14.01/apt-update/apt-update.c0000644000175000017500000000371212301414317021014 0ustar andreasandreas/* Copyright (c) 2011 Canonical, Ltd. */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include int main(int argc, char *argv[], char *envp[]) { char *apt_argv[] = {"/usr/bin/apt-get", "-q", "update", NULL}; char *apt_envp[] = {"PATH=/bin:/usr/bin", NULL, NULL}; // Set the HOME environment variable struct passwd *pwd = getpwuid(geteuid()); if (!pwd) { fprintf(stderr, "error: Unable to find passwd entry for uid %d (%s)\n", geteuid(), strerror(errno)); exit(1); } if (asprintf(&apt_envp[1], "HOME=%s", pwd->pw_dir) == -1) { perror("error: Unable to create HOME environment variable"); exit(1); } // Drop any supplementary group if (setgroups(0, NULL) == -1) { perror("error: Unable to set supplementary groups IDs"); exit(1); } // Set real/effective gid and uid if (setregid(pwd->pw_gid, pwd->pw_gid) == -1) { fprintf(stderr, "error: Unable to set real and effective gid (%s)\n", strerror(errno)); exit(1); } if (setreuid(pwd->pw_uid, pwd->pw_uid) == -1) { perror("error: Unable to set real and effective uid"); exit(1); } // Close all file descriptors except the standard ones struct rlimit rlp; if (getrlimit(RLIMIT_NOFILE, &rlp) == -1) { perror("error: Unable to determine file descriptor limits"); exit(1); } int file_max; if (rlp.rlim_max == RLIM_INFINITY || rlp.rlim_max > 4096) file_max = 4096; else file_max = rlp.rlim_max; int file; for (file = 3; file < file_max; file++) { close(file); } // Set umask to 022 umask(S_IWGRP | S_IWOTH); if (chdir("/") == -1) { perror("error: Unable to change working directory"); exit(1); } // Run apt-get update execve(apt_argv[0], apt_argv, apt_envp); perror("error: Unable to execute apt-get"); return 1; } landscape-client-14.01/dev/0000755000175000017500000000000012301414317015313 5ustar andreasandreaslandscape-client-14.01/dev/upload-to-ppa0000755000175000017500000000602112301414317017722 0ustar andreasandreas#!/bin/sh -e help () { cat <|--ppa= The PPA to upload to. This gets passed to dput, please make sure you have a matching stanza in your ~/.dput.cf -k=|--key= The GPG key used to sign the packages -s|--snapshot Tweak the Debian revision by including the current bzr revision number in it (e.g. 1.4.0~bzr178-0ubuntu0.8.04) -h|--help Print this help and exit EOF exit } # # Check if we are in a bzr branch # if ! [ -d .bzr ] || ! [ -f debian/changelog ]; then echo "Error: not in a package bzr branch" echo help fi # # Set defaults and parse command line arguments # ppa=landscape key=free.ekanayaka@canonical.com snapshot=no package=$(dpkg-parsechangelog |grep ^Source|cut -f 2 -d " ") version=$(dpkg-parsechangelog |grep ^Version|cut -f 2 -d " ") upstream=$(echo $version | cut -f 1 -d "-") for i in $*; do case $i in -p=*|--ppa=*) ppa=`echo $i | sed 's/[-a-zA-Z0-9]*=//'` ;; -k=*|--key=*) key=`echo $i | sed 's/[-a-zA-Z0-9]*=//'` ;; -s|--snapshot) snapshot=yes ;; -h|--help) help ;; *) echo "Error: unknown option $i" echo help ;; esac done if [ "$snapshot" = "yes" ]; then bzr_rev=$(bzr log -l 1|grep ^revno|cut -f 2 -d " ") upstream="$upstream~bzr$bzr_rev" fi # # Clean up from possible previous runs # rm -fR ../${package}-* rm -f ../${package}_* # # Export the sources # bzr export ../${package}-${upstream} cd .. cp -a ${package}-${upstream} ${package}-${upstream}.orig rm -R ${package}-${upstream}.orig/debian cd ${package}-${upstream} # # Build source packages and upload them # releases="hardy_8.04 karmic_9.10 lucid_10.04 maverick_10.10 natty_11.04 oneiric_11.10" if [ "$snapshot" = "yes" ]; then # Snapshot, we'll add a dummy changelog entry like for all releases source_opt="-sa" releases="$releases natty_11.04" else # Actual release, use the latest changelog entry and upload now dpkg-buildpackage -S -sa -k$key dput $ppa ../${package}_${version}_source.changes source_opt="-sd" fi for release in $releases; do codename=$(echo $release|cut -f 1 -d _) revision=0ubuntu0.$(echo $release|cut -f 2 -d _) if ! [ "$snapshot" = "yes" ]; then revision=${revision}.0~landscape1 fi version=$upstream-$revision if [ "$snapshot" = "yes" ]; then message="Snapshot build for $codename" else message="Built for $codename, no source changes" fi cp debian/changelog ../ dch --force-distribution -b -v $version -D $codename -m $message dpkg-buildpackage -S $source_opt -k$key dput $ppa ../${package}_${version}_source.changes mv ../changelog debian source_opt="-sd" done landscape-client-14.01/dev/tarmac-config0000644000175000017500000000033012301414317017744 0ustar andreasandreas[lp:landscape-client] verify_command = make check voting_criteria = Approve >= 2, Disapprove == 0 commit_message_template = Merge [f=] [r=] [a=]\n landscape-client-14.01/dev/dns-server0000755000175000017500000000667512301414317017347 0ustar andreasandreas#!/usr/bin/env python """ This script creates a DNS server that has enough functionality to test the server autodiscovery feature of landscape client. Landscape client uses /etc/resolv.conf to find the location of name servers. To have landscape client use this server, make sure that: nameserver 127.0.0.1 is the first nameserver in /etc/resolv.conf. Linux name lookups only support port 53, so this program must run on port 53 in order to work. Be aware that NetworkManager overwrites this file any time your network configuration changes. This program does not return enough information for a tool like dig to complete successfully. If this is needed, use Bind 9, detailed at https://wiki.canonical.com/Landscape/SpecRegistry/0082 """ import argparse import sys from twisted.internet import reactor, defer from twisted.names import dns, common from twisted.names.server import DNSServerFactory PORT = 5553 SRV_RESPONSE = 'lds1.mylandscapehost.com' A_RESPONSE = '127.0.0.1' class SimpleResolver(common.ResolverBase): def _lookup(self, name, cls, typ, timeout): """ Respond to DNS requests. See documentation for L{twisted.names.common.ResolverBase}. """ # This nameserver returns the same result all the time, regardless # of what name the client asks for. results = [] ttl = 60 if typ == dns.SRV: record = dns.Record_SRV(0, 1, 80, SRV_RESPONSE, ttl) owner = '_landscape._tcp.mylandscapehost.com' results.append(dns.RRHeader(owner, record.TYPE, dns.IN, ttl, record, auth=True)) elif typ == dns.A: record = dns.Record_A(A_RESPONSE) owner = 'landscape.localdomain' results.append(dns.RRHeader(owner, record.TYPE, dns.IN, ttl, record, auth=True)) authority = [] return defer.succeed((results, authority, [])) def parse_command_line(args): global SRV_RESPONSE, A_RESPONSE, PORT description = """ This test tool responds to DNS queries for SRV and A records. It always responds with the same result regardless of the query string sent by the client. To test this tool, try the following commands: dig -p 5553 @127.0.0.1 SRV _landscape._tcp.mylandscapehost.com dig -p 5553 @127.0.0.1 localhost.localdomain """ parser = argparse.ArgumentParser(description=description) parser.add_argument("--srv-response", type=str, default=SRV_RESPONSE, help="Give this reply to SRV queries (eg: localhost)") parser.add_argument("--a-response", type=str, default=A_RESPONSE, help="Give this reply to A queries (eg: 127.0.0.1)") parser.add_argument("--port", type=int, default=PORT, help="Listen on this port (default 5553). DNS " "normally runs on port 53") args = vars(parser.parse_args()) SRV_RESPONSE = args["srv_response"] A_RESPONSE = args["a_response"] PORT = args["port"] def main(): parse_command_line(sys.argv) simple_resolver = SimpleResolver() factory = DNSServerFactory(authorities=[simple_resolver], verbose=1) protocol = dns.DNSDatagramProtocol(factory) print "starting reactor on port %s.." % PORT reactor.listenTCP(PORT, factory) reactor.listenUDP(PORT, protocol) reactor.run() print "reactor stopped..." if __name__ == "__main__": main() landscape-client-14.01/dev/landscape-client-vm0000755000175000017500000001244512301414317021075 0ustar andreasandreas#!/bin/sh -e help () { cat <: build a landscape-client VM for the specified release. Available options: -m, --mirror The Ubuntu mirror to use to build the VM, and for the APT sources inside the VM itself. If you want to use apt-proxy, you have to modify the apt-proxy-v2.conf file to make apt-proxy listen to your actual network interface address, instead of the loopback one. For example: ;; Server IP to listen on address = 192.168.1.162 -s, --server The hostname of the Landscape server the client should connect to. -a, --account The name of the Landscape account to use. -p, --password The password for the Landscape account. -P, --profile Package profile to use, can be server or desktop. -k, --ssl-key Specify an SSL key to be used in the client config. -r, --repository Specify the PPA holding the client packages. -b, --build If yes, the landscape-client packages from this branch will be built and installed inside the VM, otherwise they will be pulled from the APT repositories. For example, this script can be invoked like this: ./dev/landscape-client-vm --password intrepid where is the account password of the landscape-devel account on the Landscape staging server (or you can specify another account with the --account parameter). The built VM will be stored under ./build/intrepid along with some other files. To launch the VM, cd to ./build/intrepid and issue: $ ./run Once it's booted you can log into it with: $ ./ssh EOF } OPTS=$(getopt -o hm:s:a:p:P:k:r:b: --long help,mirror:,server:,account:,password:,profile:,ssl-key:,repository:,build: -- "$@") if [ $? != 0 ]; then exit 1 fi eval set -- "$OPTS" MIRROR=http://archive.ubuntu.com/ubuntu SERVER=staging.landscape.canonical.com ACCOUNT=landscape-devel PASSWORD= PROFILE=server SSL_KEY= PPA=landscape/trunk BUILD=yes while true ; do case "$1" in -h|--help) help; exit 1; shift ;; -m|--mirror) MIRROR=$2; shift 2 ;; -s|--server) SERVER=$2; shift 2;; -a|--account) ACCOUNT=$2; shift 2;; -p|--password) PASSWORD=$2; shift 2;; -P|--profile) PROFILE=$2; shift 2;; -S|--ssl-key) SSL_KEY=$2; shift 2;; -r|--repository) PPA=$2; shift 2;; -b|--build) BUILD=$2; shift 2;; --) shift ; break ;; *) echo "Internal error!" ; exit 1 ;; esac done if [ "$1" = "" ]; then help exit fi RELEASE=$1 TOPDIR=$(pwd)/build/${RELEASE}-${PROFILE} SSH_KEY=$TOPDIR/ssh_key SSH_PORT=3322 ROOTSIZE=8192 rm -fR $TOPDIR mkdir -p $TOPDIR ssh-keygen -N '' -f $SSH_KEY cd $TOPDIR cat > config <> config fi cat > script-wrapper < ppa-key < script < /etc/default/landscape-client EOF chmod 755 script cat > manifest <> manifest fi cat > ssh < run < The Ubuntu mirror to use to build the pbuilder used by piuparts chroot. -f, --force Force running the tests, even if the debian/ directory hasn't been changed in the last bzr commit. EOF } # # Check if X is running, if not abort because we would make X crash (D-Bus!) # if pidof X > /dev/null; then cat <= 0.36 # PIUPARTS_VERSION=$(sudo piuparts --version 2>/dev/null| cut -f 2 -d " ") if ! dpkg --compare-versions $PIUPARTS_VERSION ge 0.36; then cat < ${TOPDIR}/scripts/post_purge_policy_kit <, YEAR. # Thomas Hervé , 2012. # msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2013-01-24 10:45-0200\n" "PO-Revision-Date: 2012-03-28 20:07+0100\n" "Last-Translator: Thomas Hervé \n" "Language-Team: français <>\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n!=1);\n" #: ../landscape/ui/controller/app.py:49 msgid "Success." msgstr "Réussi." #: ../landscape/ui/controller/app.py:58 msgid "Failure." msgstr "Echoué." #: ../landscape/ui/controller/app.py:90 msgid "Authentication failed" msgstr "L'authentification a échoué" #: ../landscape/ui/controller/configuration.py:87 msgid "Registering client failed" msgstr "L'enregistrement du client a échoué" #: ../landscape/ui/controller/configuration.py:90 msgid "Registering client was successful" msgstr "L'enregistrement du client a réussi" #: ../landscape/ui/controller/configuration.py:98 #: ../landscape/ui/controller/configuration.py:101 #, python-format msgid "Attempting to register at %s" msgstr "Tentative d'enregistrement sur %s" #: ../landscape/ui/controller/configuration.py:112 msgid "Disabling client failed" msgstr "Le client n'a pas pu être désactivé" #: ../landscape/ui/controller/configuration.py:115 msgid "Disabling client was successful" msgstr "Le client a été désactivé avec succès" #: ../landscape/ui/controller/configuration.py:120 msgid "Attempting to disable landscape client." msgstr "Tentative de désactivation du client Landscape." #: ../landscape/ui/view/configuration.py:55 #: ../applications/landscape-client-settings.desktop.in.h:1 msgid "Landscape Service" msgstr "Service Landscape" #: ../landscape/ui/view/configuration.py:124 msgid "None" msgstr "Aucun" #: ../landscape/ui/view/configuration.py:128 msgid "Landscape - hosted by Canonical" msgstr "Landscape - hébergé par Canonical" #: ../landscape/ui/view/configuration.py:132 msgid "Landscape - dedicated server" msgstr "Landscape - serveur dédié" #: ../landscape/ui/view/configuration.py:136 msgid "Register" msgstr "S'enregistrer" #: ../landscape/ui/view/configuration.py:140 msgid "Disable" msgstr "Désactiver" #: ../landscape/ui/view/configuration.py:144 msgid "Invalid host name." msgstr "Nom d'hôte invalide." #: ../landscape/ui/view/configuration.py:148 msgid "Only ASCII characters are allowed." msgstr "Seuls les caractères ASCII sont autritorisés." #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:1 msgid "" "Landscape is a remote administration service from Canonical. If you allow " "it, a Landscape server can monitor this computer's performance and send " "administration commands." msgstr "" "Landscape est un service de gestion à distance de Canonical. Si vous " "l'autorisez, un serveur Landscape peut surveiller les performance de cette " "machine et envoyer des commandes administratives." #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:2 #: ../scripts/landscape-client-ui-install:52 msgid "Find out more..." msgstr "En apprendre plus..." #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:3 msgid "Landscape service:" msgstr "Serveur Landscape à utiliser:" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:4 msgid "Account name:" msgstr "Nom du compte:" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:5 msgid "Registration Key:" msgstr "Clé d'enregistrement:" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:6 msgid "Don't have an account?" msgstr "Vous n'avez pas de compte?" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:7 msgid "Sign up..." msgstr "S'inscrire..." #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:8 msgid "Landscape server hostname:" msgstr "Nom d'hôte du serveur Landscape:" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:9 msgid "" "If you click \"Disable\" the Landscape client on this machine will be " "disabled. You can reenable it later by revisiting this dialog." msgstr "" "Si vous cliquez sur \"Désactiver\" le client Landscape sera désactivé sur " "cette machine. Vous pouvez le réactiver plus tard en rouvrant cette fenêtre " "de dialogue." #: ../applications/landscape-client-settings.desktop.in.h:2 msgid "Landscape Management Service Preferences" msgstr "Préférences du service de gestion Landscape" #: ../polkit-1/com.canonical.LandscapeClientSettings.policy.in.h:1 msgid "Allow the user to read and write Landscape Client settings." msgstr "" "Autorise l'utilisateur à lire et écrire la configuration du client Landscape." #: ../polkit-1/com.canonical.LandscapeClientSettings.policy.in.h:2 msgid "" "System policy prevents you from reading and writing Landscape Client " "Settings." msgstr "" "Politique système vous empêchant de lire et d'écrire la configuration du " "client Landscape." #: ../scripts/landscape-client-ui-install:55 msgid "Landscape client" msgstr "Client Landscape" #: ../scripts/landscape-client-ui-install:56 msgid "" "Landscape is an easy-to-use commercial systems management and monitoring " "service offered by Canonical that helps administrators manage multiple " "machines efficiently." msgstr "" "Landscape est un système de gestion et de surveillance proposé en tant que " "service payant par Canonical, qui aide les administrateurs à gérer plusieurs " "machine efficacement." #: ../scripts/landscape-client-ui-install:59 msgid "" "You need to install Landscape client to be able to configure it. Do you want " "to install it now?" msgstr "" "Il faut installer le client Landscape pour le configurer. Voulez-vous " "l'installer maintenant?" #: ../scripts/landscape-client-ui-install:61 msgid "Install Landscape client?" msgstr "Installer le client Landscape?" #: ../scripts/landscape-client-ui-install:62 msgid "Install" msgstr "Installer" landscape-client-14.01/po/landscape-client.pot0000644000175000017500000001064112301414317021107 0ustar andreasandreas# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2013-01-24 10:45-0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #: ../landscape/ui/controller/app.py:49 msgid "Success." msgstr "" #: ../landscape/ui/controller/app.py:58 msgid "Failure." msgstr "" #: ../landscape/ui/controller/app.py:90 msgid "Authentication failed" msgstr "" #: ../landscape/ui/controller/configuration.py:87 msgid "Registering client failed" msgstr "" #: ../landscape/ui/controller/configuration.py:90 msgid "Registering client was successful" msgstr "" #: ../landscape/ui/controller/configuration.py:98 #: ../landscape/ui/controller/configuration.py:101 #, python-format msgid "Attempting to register at %s" msgstr "" #: ../landscape/ui/controller/configuration.py:112 msgid "Disabling client failed" msgstr "" #: ../landscape/ui/controller/configuration.py:115 msgid "Disabling client was successful" msgstr "" #: ../landscape/ui/controller/configuration.py:120 msgid "Attempting to disable landscape client." msgstr "" #: ../landscape/ui/view/configuration.py:55 #: ../applications/landscape-client-settings.desktop.in.h:1 msgid "Landscape Service" msgstr "" #: ../landscape/ui/view/configuration.py:124 msgid "None" msgstr "" #: ../landscape/ui/view/configuration.py:128 msgid "Landscape - hosted by Canonical" msgstr "" #: ../landscape/ui/view/configuration.py:132 msgid "Landscape - dedicated server" msgstr "" #: ../landscape/ui/view/configuration.py:136 msgid "Register" msgstr "" #: ../landscape/ui/view/configuration.py:140 msgid "Disable" msgstr "" #: ../landscape/ui/view/configuration.py:144 msgid "Invalid host name." msgstr "" #: ../landscape/ui/view/configuration.py:148 msgid "Only ASCII characters are allowed." msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:1 msgid "" "Landscape is a remote administration service from Canonical. If you allow " "it, a Landscape server can monitor this computer's performance and send " "administration commands." msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:2 #: ../scripts/landscape-client-ui-install:52 msgid "Find out more..." msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:3 msgid "Landscape service:" msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:4 msgid "Account name:" msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:5 msgid "Registration Key:" msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:6 msgid "Don't have an account?" msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:7 msgid "Sign up..." msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:8 msgid "Landscape server hostname:" msgstr "" #: ../landscape/ui/view/ui/landscape-client-settings.glade.h:9 msgid "" "If you click \"Disable\" the Landscape client on this machine will be " "disabled. You can reenable it later by revisiting this dialog." msgstr "" #: ../applications/landscape-client-settings.desktop.in.h:2 msgid "Landscape Management Service Preferences" msgstr "" #: ../polkit-1/com.canonical.LandscapeClientSettings.policy.in.h:1 msgid "Allow the user to read and write Landscape Client settings." msgstr "" #: ../polkit-1/com.canonical.LandscapeClientSettings.policy.in.h:2 msgid "" "System policy prevents you from reading and writing Landscape Client " "Settings." msgstr "" #: ../scripts/landscape-client-ui-install:55 msgid "Landscape client" msgstr "" #: ../scripts/landscape-client-ui-install:56 msgid "" "Landscape is an easy-to-use commercial systems management and monitoring " "service offered by Canonical that helps administrators manage multiple " "machines efficiently." msgstr "" #: ../scripts/landscape-client-ui-install:59 msgid "" "You need to install Landscape client to be able to configure it. Do you want " "to install it now?" msgstr "" #: ../scripts/landscape-client-ui-install:61 msgid "Install Landscape client?" msgstr "" #: ../scripts/landscape-client-ui-install:62 msgid "Install" msgstr "" landscape-client-14.01/po/POTFILES.in0000644000175000017500000000050112301414317016724 0ustar andreasandreas[encoding: UTF-8] landscape/ui/controller/app.py landscape/ui/controller/configuration.py landscape/ui/view/configuration.py landscape/ui/view/ui/landscape-client-settings.glade applications/landscape-client-settings.desktop.in polkit-1/com.canonical.LandscapeClientSettings.policy.in scripts/landscape-client-ui-install landscape-client-14.01/README0000644000175000017500000000267512301414317015427 0ustar andreasandreas== Non-root mode == The Landscape Client generally runs as a combination of the 'root' and 'landscape' users. It is possible to disable the administrative features of Landscape and run only the monitoring parts of it without using the 'root' user at all. If you wish to use the Landscape Client in this way, it's recommended that you perform these steps immediately after installing the landscape-client package. Edit /etc/default/landscape-client and add the following lines: RUN=1 DAEMON_USER=landscape Edit /etc/landscape/client.conf and add the following line: monitor_only = true Now you can run 'sudo landscape-config' as usual to complete the configuration of your client and register with the Landscape service. == Developing == To run the full test suite, you must have a dbus session bus running. If you don't have one (for example, if you're running the tests in an ssh session), run the following command: export DBUS_SESSION_BUS_ADDRESS=`dbus-daemon --print-address=1 --session --fork` Then your tests should pass. When you want to test the landscape client manually without management features, you can simply run: $ ./scripts/landscape-client This defaults to the 'landscape-client.conf' configuration file. When you want to test management features manually, you'll need to run as root. There's a configuration file 'root-client.conf' which specifies use of the system bus. $ sudo ./scripts/landscape-client -c root-client.conf landscape-client-14.01/polkit-1/0000755000175000017500000000000012301414317016175 5ustar andreasandreaslandscape-client-14.01/polkit-1/com.canonical.LandscapeClientSettings.policy.in0000644000175000017500000000150212301414317027276 0ustar andreasandreas Canonical Ltd. http://www.canonical.com preferences-management-service <_description>Allow the user to read and write Landscape Client settings. <_message>System policy prevents you from reading and writing Landscape Client Settings. auth_admin_keep auth_admin_keep auth_admin_keep landscape-client-14.01/landscape-client.conf0000644000175000017500000000044412301414317020614 0ustar andreasandreas[client] bus = session computer_title = John's PC account_name = onward registration_key = url = http://localhost:8080/message-system data_path = /tmp/landscape/ log_dir = /tmp/landscape/ log_level = debug pid_file = /tmp/landscape/landscape-client.pid ping_url = http://localhost:8081/ping landscape-client-14.01/dbus-1/0000755000175000017500000000000012301414317015630 5ustar andreasandreaslandscape-client-14.01/dbus-1/com.canonical.LandscapeClientRegistration.service0000644000175000017500000000017612301414317027345 0ustar andreasandreas[D-BUS Service] Name=com.canonical.LandscapeClientRegistration Exec=/usr/bin/landscape-client-registration-mechanism User=rootlandscape-client-14.01/dbus-1/landscape.conf0000644000175000017500000000332512301414317020434 0ustar andreasandreas landscape-client-14.01/dbus-1/com.canonical.LandscapeClientSettings.service0000644000175000017500000000016612301414317026472 0ustar andreasandreas[D-BUS Service] Name=com.canonical.LandscapeClientSettings Exec=/usr/bin/landscape-client-settings-mechanism User=rootlandscape-client-14.01/dbus-1/com.canonical.LandscapeClientRegistration.conf0000644000175000017500000000122412301414317026625 0ustar andreasandreas system landscape-client-14.01/dbus-1/com.canonical.LandscapeClientSettings.conf0000644000175000017500000000120012301414317025745 0ustar andreasandreas system landscape-client-14.01/LICENSE0000644000175000017500000004313112301414317015544 0ustar andreasandreas GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. landscape-client-14.01/root-client.conf0000644000175000017500000000067612301414317017654 0ustar andreasandreas[client] bus = system computer_title = John's PC account_name = onward registration_key = url = http://localhost:8080/message-system package_hash_id_url = http://localhost:8080/hash-id-databases data_path = /tmp/landscape-root log_dir = /tmp/landscape-root log_level = debug pid_file = /tmp/landscape-root/landscape-client.pid ping_url = http://localhost:8081/ping include_manager_plugins = ScriptExecution script_users = www-data, nobody, root landscape-client-14.01/glib-2.0/0000755000175000017500000000000012301414317015747 5ustar andreasandreaslandscape-client-14.01/glib-2.0/schemas/0000755000175000017500000000000012301414317017372 5ustar andreasandreaslandscape-client-14.01/glib-2.0/schemas/com.canonical.landscape-client-settings.gschema.xml0000644000175000017500000000304312301414317031271 0ustar andreasandreas "not" Whether the client settings UI currently set to a hosted client configuration, an LDS instance or no management service. "" The title to register the machine with. "landscape.canonical.com" The hostname of the Canonical hosted landscape system. "" An account name for use with the Canonical hosted landscape system. "" A password for use with the Canonical hosted landscape system "" The hostname of the local landscape system. "" An account name for use with the local landscape system. "" A password for use with the local landscape system landscape-client-14.01/example.conf0000644000175000017500000001223312301414317017040 0ustar andreasandreas# This is an example configuration file for the landscape client. # It is not meant to be machine-readable, but to list all the existing configuration # options it recognises so that you can create a valid, machine-readable configuration # file (typically as /etc/landscape/client.conf). # All lines starting with a pound sign should be considered comments. # Values given are the default. [client] # GENERAL OPTIONS # The directory in which landscape-client will store data files in. data_path = /var/lib/landscape/client/ # If set to True, do not log to standard output. quiet = False # The directory in which to store log files. log_dir = /var/log/landscape/ # The log level at which to log events. # Values can be one of: "debug", "info", "warning", "error", "critical" log_level = info # The main URL for the landscape server to connect this client to. If you # purchased a Landscape Dedicated Server (LDS), change this to point to your # server instead. This needs to point to the message-system URL. # # Please pay special attention to the protocol used here, since it is a common # source of error. # # Example: # url = https://landscape.example.com/message-system url = https://landscape.canonical.com/message-system # The ping url you want this client to report to. # # If you have purchased a Landscape Dedicated Server (LDS), change this to # point to your server instead. # # Example: # ping_url = http://landscape.example.com/ping ping_url = http://landscape.canonical.com/ping # The public SSL certificate file against which the connection to the landscape # server (set in "url") will be checked. # # This configuration entry is not set by default. # #ssl_public_key # Wether to use server autodiscovery or not. server_autodiscovery = False # The autodiscovery query using multicast DNS. autodiscover_srv_query_string = _landscape._tcp.localdomain # The autodiscovery DNS server name. autodiscovery_a_query_string = landscape.localdomain # If set to True interrupt (SIGINT) signals will be ignored by the # landscape-client daemon. ignore_sigint = False # If set to True, user signal 1 (SIGUSR1) will be ignored by the landscape # client daemon. # SIGUSR1 is used to force rotating logs. ignore_sigusr1 = False # MONITOR OPTIONS # A comma-separated list of monitor plugins to use. # # Currently available monitor plugins are: # # ActiveProcessInfo - lists active processes # ComputerInfo - various information # HardwareInventory - information provided by the "lshw" command # LoadAverage - load information # MemoryInfo - memory information # MountInfo - information about mount points (space available, used) # ProcessorInfo - type, speed, instruction sets # Temperature - temperature sensors information # PackageMonitor - packages installed, available, versions # UserMonitor - users, groups # RebootRequired - whether a reboot is required or not # NetworkActivity - network information (TX, RX) # NetworkDevice - a list of connected network devices # UpdateManager - controls when distribution upgrades are prompted # # The special value "ALL" is an alias for the full list of plugins. monitor_plugins = ALL # The number of seconds between monitor flushes. flush_interval = 300 # 5 minutes # BROKER OPTIONS # The account name this computer belongs to. # This configuration option has no default value. # # It is required that you define a value for this entry. account_name = dummy # An optional account-wide key used to register clients. # You can define a registration key in your account settings. # There is no key defined by default. #registration_key = secret-key # The computer title to report to the landscape server as identifying this # computer. # # It is required that you define a value for this entry. #computer_title = Database Server 1 # The number of seconds between server exchanges exchange_interval = 900 # 15 minutes # The number of seconds between urgent exchanges with the server. urgent_exchange_interval = 60 # 1 minute # The number of seconds between pings. ping_interval = 30 # The number of seconds between apt update calls. apt_update_interval = 21600 # The number of seconds between package monitor runs. package_monitor_interval = 1800 # The URL of the http proxy to use, if any. # This value is optional. # #http_proxy=http://my-proxy.example.com:8080 # The URL of the https proxy to use, if any. # This value is optional. #https_proxy=https://my-proxy.example.com:8081 # If set, your machine will be marked as a cloud instance. cloud = True # The One Time Password (OTP) that was given by cloud-init, to be used during # registration. # # This has no default. #otp = ABCD1234 # A comma-separated list of tags to attach to this computer. # # Example: # tags = servers, oneiric, database, production #tags = example # MANAGER OPTIONS # A comma-separated list of monitor plugins to use. # # Currently available monitor plugins are: # # ProcessKiller # PackageManager # UserManager # ShutdownManager # AptSources # HardwareInfo # # The special vale "ALL" is an alias for the full list of plugins. manager_plugins = ALL # A comma-separated list of usernames that scripts can run as. # # By default, all usernames are allowed. script_users = ALL landscape-client-14.01/setup.cfg0000644000175000017500000000041312301414317016354 0ustar andreasandreas[build] i18n=True [build_i18n] domain=landscape-client xml_files=[("share/polkit-1/actions/", ["polkit-1/com.canonical.LandscapeClientSettings.policy.in"])] desktop_files=[("share/applications", ["applications/landscape-client-settings.desktop.in"])] merge_po=True landscape-client-14.01/man/0000755000175000017500000000000012301414317015310 5ustar andreasandreaslandscape-client-14.01/man/landscape-sysinfo.10000644000175000017500000000740612301414317021023 0ustar andreasandreas.\"Text automatically generated by txt2man .TH landscape-sysinfo 1 "17 June 2013" "" "" .SH NAME \fBlandscape-sysinfo \fP- Display a summary of the current system status \fB .SH SYNOPSIS .nf .fam C \fBlandscape-sysinfo\fP [\fIoptions\fP] .fam T .fi .fam T .fi .SH DESCRIPTION \fBlandscape-sysinfo\fP is a utility that displays information about the computer it is run on. This information is displayed upon login for console users (ssh and terminal login supported) and can also be seen at anytime by just calling \fBlandscape-sysinfo\fP from the command line. .PP The output of \fBlandscape-sysinfo\fP can be controlled by enabling or disabling its plugins. See below how to do that. .PP If the system load is higher than the number of cores, as determined by the count of processor lines in /proc/cpuinfo, then \fBlandscape-sysinfo\fP will not run at login time. This is to prevent it from potentially making a bad situation worse by interfering with what could be an administrator logging in to try to fix the problem causing the high load. .SH OPTIONS .TP .B \fB--version\fP show program's version number and exit .TP .B \fB-h\fP, \fB--help\fP show this help message and exit .TP .B \fB-c\fP FILE, \fB--config\fP=FILE Use config from this file (any command line \fIoptions\fP override settings from the file) (default: '/etc/landscape/client.conf'). .TP .B \fB-d\fP PATH, \fB--data-path\fP=PATH The directory to store data files in (default: '/var/lib/landscape/client/'). .TP .B \fB--sysinfo-plugins\fP=PLUGIN_LIST Comma-delimited list of sysinfo plugins to use. Default is to use all plugins. .TP .B \fB--exclude-sysinfo-plugins\fP=PLUGIN_LIST Comma-delimited list of sysinfo plugins to NOT use. This always take precedence over plugins to include. .RE .PP Default plugins: Load, Disk, Memory, Temperature, Processes, LoggedInUsers, LandscapeLink, Network .SH CONFIGURATION FILE Any of the long command-line \fIoptions\fP can be used as a configuration directive in that configuration file, under the section [sysinfo], by replacing the hyphen (-) with an underscore (_). .PP For example, to disable the LandscapeLink and Temperature plugins without having to use the command line option, the following can be added to /etc/landscape/client.conf: .PP .nf .fam C [sysinfo] exclude_sysinfo_plugins = Temperature, LandscapeLink .fam T .fi .SH EXAMPLES This is the default configuration with all plugins enabled: .PP .nf .fam C $ landscape-sysinfo System load: 0.66 Processes: 242 Usage of /home: 72.0% of 27.50GB Users logged in: 1 Memory usage: 31% IP address for wlan0: 10.0.1.6 Swap usage: 0% IP address for virbr0: 192.168.122.1 Temperature: 47 C => There is 1 zombie process. Graph this data and manage this system at https://landscape.canonical.com/ .fam T .fi If you want to disable the temperature and Landscape plugins, you could run it like this: .PP .nf .fam C $ landscape-sysinfo --exclude-sysinfo-plugins=Temperature,LandscapeLink System load: 1.08 Processes: 242 Usage of /home: 72.0% of 27.50GB Users logged in: 1 Memory usage: 31% IP address for wlan0: 10.0.1.6 Swap usage: 0% IP address for virbr0: 192.168.122.1 => There is 1 zombie process. .fam T .fi .SH FILES .TP .B /etc/landscape/client.conf Configuration file .TP .B /var/log/landscape/sysinfo.log Log file for when the tool is run as root. This file will usually be empty, unless something wrong happened. In that case, it will have more information about the problem. When a regular non-root user runs the tool, the log file is ~/.landscape/sysinfo.log. .SH SEE ALSO \fBlandscape-client\fP(1) \fBupdate-motd\fP(5) landscape-client-14.01/man/landscape-config.txt0000644000175000017500000001565212301414317021257 0ustar andreasandreasNAME landscape-config - configure the Landscape management client SYNOPSIS landscape-config [options] DESCRIPTION Before using the landscape-client it must be configured with account and computer information that is transmitted to the Landscape server when the client connects. The registration can be performed with or without user interaction. Running the landscape-config program without arguments will start the program, prompting you for the necessary information to run the client. Optionally you can provide command-line arguments (detailed below) to specify default responses. You will be told if the registration was successful or if an error occurred. When registration is successful Landscape is configured and running on your system. Errors can occur if you provide incorrect registration details or if there are network issues. In the latter case, the client will keep trying to complete the registration in the background. OPTIONS --version Show program's version number and exit. -h, --help Show this help message and exit. -c FILE, --config=FILE Use config from this file (any command line options override settings from the file) (default: '/etc/landscape/client.conf'). -d PATH, --data-path=PATH The directory to store data files in (default: '/var/lib/landscape/client'). -q, --quiet Do not log to the standard output. -l FILE, --log-dir=FILE The directory to write log files to (default: '/var/log/landscape'). --log-level=LOG_LEVEL One of 'debug', 'info', 'warning', 'error' or 'critical' (default: 'info'). --ignore-sigint Ignore interrupt signals. --ignore-sigusr1 Ignore SIGUSR1 signal to rotate logs. -a NAME, --account-name=NAME The account this computer belongs to. -p KEY, --registration-key=KEY The account-wide key used for registering clients. -t TITLE, --computer-title=TITLE The title of this computer. -u URL, --url=URL The server URL to connect to (default: 'https://landscape.canonical.com/message-system'). -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The SSL CA certificate to verify the server with. Only used if the server URL to which we connect is https. --exchange-interval=INTERVAL The number of seconds between server exchanges (default: 900). --urgent-exchange-interval=INTERVAL The number of seconds between urgent server exchanges (default: 60). --ping-interval=INTERVAL The number of seconds between pings (default: 30). --ping-url=PING_URL The URL to perform lightweight exchange initiation with (default: 'http://landscape.canonical.com/ping'). --package-monitor-interval=PACKAGE_MONITOR_INTERVAL The interval between package monitor runs (default: 1800). --apt-update-interval=APT_UPDATE_INTERVAL The interval between apt update runs (default: 21600). --http-proxy=URL The URL of the HTTP proxy, if one is needed. --https-proxy=URL The URL of the HTTPS proxy, if one is needed. --cloud Set this if this computer is a cloud instance in EC2 or UEC. Read below for details. --tags=TAGS Comma separated list of tag names to be sent to the server. --import=FILENAME_OR_URL Filename or URL to import configuration from. Imported options behave as if they were passed in the command line, with precedence being given to real command line options. --script-users=USERS A comma-separated list of users to allow scripts to run. To allow scripts to be run by any user, enter: ALL. --server-autodiscovery=BOOLEAN Whether to enable server autodiscovery or not. Defaults to "False". --autodiscovery-srv-query-string=QUERY The autodiscovery string for DNS SRV queries. Defaults to "_landscape._tcp.localdomain". --autodiscover-a-query-string=QUERY The autodiscovery string for DNS A queries. Defaults to "landscape.localdomain". --include-manager-plugins=PLUGINS A comma-separated list of manager plugins to load explicitly. -n, --no-start Don't start the client automatically. --ok-no-register Return exit code 0 instead of 2 if the client can't be registered. --silent Run without manual interaction. --disable Stop running clients and disable start at boot. --otp=OTP The one-time password (OTP) to use in cloud configuration. CLOUD Landscape has some cloud features that become available when the EC2 or UEC machine instance was started using Landscape and the AMI is one of the official ones provided in the Web user interface. We call these instances "Landscape enabled", because they contain a pre-configured landscape-client installed in them which will register the running instance automatically with Landscape as soon as it starts up. You can deploy your own AMI, but if you wish the instance to become "Landscape managed" you need to take a few steps: * make sure the cloud is created in Landscape * either add "CLOUD=1" to /etc/default/landscape-client or use the --cloud switch in the landscape-config(1) command line * make sure the client is configured to start at boot (i.e., the /etc/default/landscape-client has the line "RUN=1") There is no need to further configure the /etc/landscape/client.conf file with details such as account or key, because when in cloud mode this is all discovered by the client itself. You can avoid this all if you just re-bundle the AMIs we provide. landscape-client is already configured and prepared for the cloud in them. EXAMPLES Register a machine for the first time, or reconfigure an already registered machine, interactively. Command line parameters suppress interaction for provided values. landscape-config Register a machine for the first time, or reconfigure an already registered machine, without requiring user interaction. The client will be configured to start on boot automatically: landscape-config --silent -a account-name -p secret -t `hostname` Register a machine with the script execution plugin enabled, without requiring user interaction: landscape-config --silent -a account-name -p secret -t `hostname` --script-users nobody,landscape,root Register a machine with some tags: landscape-config --silent -a account-name -p secret -t `hostname` --tags=server,www To disable a client, stopping current instances and disabling start at bootup: landscape-config --disable SEE ALSO landscape-client (1) AUTHOR Landscape Development Team landscape-client-14.01/man/landscape-client.10000644000175000017500000000571012301414317020603 0ustar andreasandreas.\"Text automatically generated by txt2man .TH landscape-client 1 "17 June 2013" "" "" .SH NAME \fBlandscape-client \fP- Landscape system client \fB .SH SYNOPSIS .nf .fam C \fBlandscape-client\fP [\fIoptions\fP] .fam T .fi .fam T .fi .SH DESCRIPTION The \fBlandscape-client\fP is the client program for the landscape system management software. The client is responsible for communicating system information to the landscape server and executing remote management commands on the system. .SH OPTIONS .TP .B \fB--version\fP Show program's version number and exit. .TP .B \fB-h\fP, \fB--help\fP Show this help message and exit. .TP .B \fB-c\fP FILE, \fB--config\fP=FILE Use config from this file (any command line \fIoptions\fP override settings from the file). (default: '/etc/landscape/client.conf') .TP .B \fB-d\fP PATH, \fB--data-path\fP=PATH The directory to store data files in (default: '/var/lib/landscape/client/'). .TP .B \fB-q\fP, \fB--quiet\fP Do not log to the standard output. .TP .B \fB-l\fP FILE, \fB--log-dir\fP=FILE The directory to write log files to (default: '/var/log/landscape'). .TP .B \fB--log-level\fP=LOG_LEVEL One of debug, info, warning, error or critical. .TP .B \fB-u\fP URL, \fB--url\fP=URL The server URL to connect to. .TP .B \fB--ping-url\fP=PING_URL The URL to perform lightweight exchange initiation with. .TP .B \fB-k\fP SSL_PUBLIC_KEY, \fB--ssl-public-key\fP=SSL_PUBLIC_KEY The public SSL key to verify the server. Only used if the given server URL is https. .TP .B \fB--server-autodiscover\fP=SERVER_AUTODISCOVER Enable server autodiscovery. .TP .B \fB--autodiscover-srv-query-string\fP=AUTODISCOVER_SRV_QUERY_STRING Autodiscovery string for DNS SRV queries .TP .B \fB--autodiscover-a-query-string\fP=AUTODISCOVER_A_QUERY_STRING Autodiscovery string for DNS A queries .TP .B \fB--ignore-sigint\fP Ignore interrupt signals. .TP .B \fB--ignore-sigusr1\fP Ignore SIGUSR1 signal to rotate logs. .TP .B \fB--daemon\fP Fork and run in the background. .TP .B \fB--pid-file\fP=PID_FILE The file to write the PID to. .TP .B \fB--monitor-only\fP Don't enable management features. This is useful if you want to run the client as a non-root user. .SH EXAMPLES To run the client in the foreground, with all logging data printed to standard I/O: .PP .nf .fam C landscape-client .fam T .fi To run the client in the background with a particular configuration file: .PP .nf .fam C landscape-client --config=my.conf --daemon .fam T .fi To run the client in the foreground, with a configuration file, but overriding the bus option: .PP .nf .fam C landscape-client --config=my.conf --bus=session .fam T .fi If you want to run the client in non-root mode, please use \fB--monitor-only\fP (or add it to the config file as monitor_only = True) and add the user you want to run as to the /etc/default/\fBlandscape-client\fP file: .PP .nf .fam C DAEMON_USER=landscape .fam T .fi .SH SEE ALSO landscape-config (1) .SH AUTHOR Landscape Development Team landscape-client-14.01/man/landscape-sysinfo.txt0000644000175000017500000000711212301414317021474 0ustar andreasandreasNAME landscape-sysinfo - Display a summary of the current system status SYNOPSIS landscape-sysinfo [options] DESCRIPTION landscape-sysinfo is a utility that displays information about the computer it is run on. This information is displayed upon login for console users (ssh and terminal login supported) and can also be seen at anytime by just calling landscape-sysinfo from the command line. The output of landscape-sysinfo can be controlled by enabling or disabling its plugins. See below how to do that. If the system load is higher than the number of cores, as determined by the count of processor lines in /proc/cpuinfo, then landscape-sysinfo will not run at login time. This is to prevent it from potentially making a bad situation worse by interfering with what could be an administrator logging in to try to fix the problem causing the high load. OPTIONS --version show program's version number and exit -h, --help show this help message and exit -c FILE, --config=FILE Use config from this file (any command line options override settings from the file) (default: '/etc/landscape/client.conf'). -d PATH, --data-path=PATH The directory to store data files in (default: '/var/lib/landscape/client/'). --sysinfo-plugins=PLUGIN_LIST Comma-delimited list of sysinfo plugins to use. Default is to use all plugins. --exclude-sysinfo-plugins=PLUGIN_LIST Comma-delimited list of sysinfo plugins to NOT use. This always take precedence over plugins to include. Default plugins: Load, Disk, Memory, Temperature, Processes, LoggedInUsers, LandscapeLink, Network CONFIGURATION FILE Any of the long command-line options can be used as a configuration directive in that configuration file, under the section [sysinfo], by replacing the hyphen (-) with an underscore (_). For example, to disable the LandscapeLink and Temperature plugins without having to use the command line option, the following can be added to /etc/landscape/client.conf: [sysinfo] exclude_sysinfo_plugins = Temperature, LandscapeLink EXAMPLES This is the default configuration with all plugins enabled: $ landscape-sysinfo System load: 0.66 Processes: 242 Usage of /home: 72.0% of 27.50GB Users logged in: 1 Memory usage: 31% IP address for wlan0: 10.0.1.6 Swap usage: 0% IP address for virbr0: 192.168.122.1 Temperature: 47 C => There is 1 zombie process. Graph this data and manage this system at https://landscape.canonical.com/ If you want to disable the temperature and Landscape plugins, you could run it like this: $ landscape-sysinfo --exclude-sysinfo-plugins=Temperature,LandscapeLink System load: 1.08 Processes: 242 Usage of /home: 72.0% of 27.50GB Users logged in: 1 Memory usage: 31% IP address for wlan0: 10.0.1.6 Swap usage: 0% IP address for virbr0: 192.168.122.1 => There is 1 zombie process. FILES /etc/landscape/client.conf Configuration file /var/log/landscape/sysinfo.log Log file for when the tool is run as root. This file will usually be empty, unless something wrong happened. In that case, it will have more information about the problem. When a regular non-root user runs the tool, the log file is ~/.landscape/sysinfo.log. SEE ALSO landscape-client(1) update-motd(5) landscape-client-14.01/man/landscape-message.txt0000644000175000017500000000132612301414317021427 0ustar andreasandreasNAME landscape-message - Send a message to the landscape web interface SYNOPSIS landscape-message [options] [MESSAGE] ... DESCRIPTION Invoking landscape-message will cause a message to appear in the History section for this computer in the Landscape web UI. If no MESSAGE is specified on the command line, then landscape-message will read the message from STDIN until EOF. OPTIONS --version Show program's version number and exit. -h, --help Show this help message and exit. -b BUS, --bus=BUS The DBUS bus to use to send the message. EXAMPLES landscape-message Hello administrator SEE ALSO landscape-client (1) AUTHOR Landscape Development Team landscape-client-14.01/man/landscape-client.txt0000644000175000017500000000562012301414317021262 0ustar andreasandreasNAME landscape-client - Landscape system client SYNOPSIS landscape-client [options] DESCRIPTION The landscape-client is the client program for the landscape system management software. The client is responsible for communicating system information to the landscape server and executing remote management commands on the system. OPTIONS --version Show program's version number and exit. -h, --help Show this help message and exit. -c FILE, --config=FILE Use config from this file (any command line options override settings from the file). (default: '/etc/landscape/client.conf') -d PATH, --data-path=PATH The directory to store data files in (default: '/var/lib/landscape/client/'). -q, --quiet Do not log to the standard output. -l FILE, --log-dir=FILE The directory to write log files to (default: '/var/log/landscape'). --log-level=LOG_LEVEL One of debug, info, warning, error or critical. -u URL, --url=URL The server URL to connect to. --ping-url=PING_URL The URL to perform lightweight exchange initiation with. -k SSL_PUBLIC_KEY, --ssl-public-key=SSL_PUBLIC_KEY The public SSL key to verify the server. Only used if the given server URL is https. --server-autodiscover=SERVER_AUTODISCOVER Enable server autodiscovery. --autodiscover-srv-query-string=AUTODISCOVER_SRV_QUERY_STRING Autodiscovery string for DNS SRV queries --autodiscover-a-query-string=AUTODISCOVER_A_QUERY_STRING Autodiscovery string for DNS A queries --ignore-sigint Ignore interrupt signals. --ignore-sigusr1 Ignore SIGUSR1 signal to rotate logs. --daemon Fork and run in the background. --pid-file=PID_FILE The file to write the PID to. --monitor-only Don't enable management features. This is useful if you want to run the client as a non-root user. EXAMPLES To run the client in the foreground, with all logging data printed to standard I/O: landscape-client To run the client in the background with a particular configuration file: landscape-client --config=my.conf --daemon To run the client in the foreground, with a configuration file, but overriding the bus option: landscape-client --config=my.conf --bus=session If you want to run the client in non-root mode, please use --monitor-only (or add it to the config file as monitor_only = True) and add the user you want to run as to the /etc/default/landscape-client file: DAEMON_USER=landscape SEE ALSO landscape-config (1) AUTHOR Landscape Development Team landscape-client-14.01/man/landscape-message.10000644000175000017500000000170612301414317020752 0ustar andreasandreas.\"Text automatically generated by txt2man .TH landscape-message 1 "17 June 2013" "" "" .SH NAME \fBlandscape-message \fP- Send a message to the landscape web interface \fB .SH SYNOPSIS .nf .fam C \fBlandscape-message\fP [\fIoptions\fP] [\fIMESSAGE\fP] \.\.\. .fam T .fi .fam T .fi .SH DESCRIPTION Invoking \fBlandscape-message\fP will cause a message to appear in the History section for this computer in the Landscape web UI. .PP If no \fIMESSAGE\fP is specified on the command line, then \fBlandscape-message\fP will read the message from STDIN until EOF. .RE .PP .SH OPTIONS .TP .B \fB--version\fP Show program's version number and exit. .TP .B \fB-h\fP, \fB--help\fP Show this help message and exit. .TP .B \fB-b\fP BUS, \fB--bus\fP=BUS The DBUS bus to use to send the message. .SH EXAMPLES \fBlandscape-message\fP Hello administrator .RE .PP .SH SEE ALSO landscape-client (1) .RE .PP .SH AUTHOR Landscape Development Team landscape-client-14.01/man/landscape-config.10000644000175000017500000001570712301414317020601 0ustar andreasandreas.\"Text automatically generated by txt2man .TH landscape-config 1 "17 June 2013" "" "" .SH NAME \fBlandscape-config \fP- configure the Landscape management client \fB .SH SYNOPSIS .nf .fam C \fBlandscape-config\fP [\fIoptions\fP] .fam T .fi .fam T .fi .SH DESCRIPTION Before using the landscape-client it must be configured with account and computer information that is transmitted to the Landscape server when the client connects. The registration can be performed with or without user interaction. .PP Running the \fBlandscape-config\fP program without arguments will start the program, prompting you for the necessary information to run the client. Optionally you can provide command-line arguments (detailed below) to specify default responses. .PP You will be told if the registration was successful or if an error occurred. When registration is successful Landscape is configured and running on your system. Errors can occur if you provide incorrect registration details or if there are network issues. In the latter case, the client will keep trying to complete the registration in the background. .SH OPTIONS .TP .B \fB--version\fP Show program's version number and exit. .TP .B \fB-h\fP, \fB--help\fP Show this help message and exit. .TP .B \fB-c\fP FILE, \fB--config\fP=FILE Use config from this file (any command line \fIoptions\fP override settings from the file) (default: '/etc/landscape/client.conf'). .TP .B \fB-d\fP PATH, \fB--data-path\fP=PATH The directory to store data files in (default: '/var/lib/landscape/client'). .TP .B \fB-q\fP, \fB--quiet\fP Do not log to the standard output. .TP .B \fB-l\fP FILE, \fB--log-dir\fP=FILE The directory to write log files to (default: '/var/log/landscape'). .TP .B \fB--log-level\fP=LOG_LEVEL One of 'debug', 'info', 'warning', 'error' or 'critical' (default: 'info'). .TP .B \fB--ignore-sigint\fP Ignore interrupt signals. .TP .B \fB--ignore-sigusr1\fP Ignore SIGUSR1 signal to rotate logs. .TP .B \fB-a\fP NAME, \fB--account-name\fP=NAME The account this computer belongs to. .TP .B \fB-p\fP KEY, \fB--registration-key\fP=KEY The account-wide key used for registering clients. .TP .B \fB-t\fP TITLE, \fB--computer-title\fP=TITLE The title of this computer. .TP .B \fB-u\fP URL, \fB--url\fP=URL The server URL to connect to (default: 'https://landscape.canonical.com/message-system'). .TP .B \fB-k\fP SSL_PUBLIC_KEY, \fB--ssl-public-key\fP=SSL_PUBLIC_KEY The SSL CA certificate to verify the server with. Only used if the server URL to which we connect is https. .TP .B \fB--exchange-interval\fP=INTERVAL The number of seconds between server exchanges (default: 900). .TP .B \fB--urgent-exchange-interval\fP=INTERVAL The number of seconds between urgent server exchanges (default: 60). .TP .B \fB--ping-interval\fP=INTERVAL The number of seconds between pings (default: 30). .TP .B \fB--ping-url\fP=PING_URL The URL to perform lightweight exchange initiation with (default: 'http://landscape.canonical.com/ping'). .TP .B \fB--package-monitor-interval\fP=PACKAGE_MONITOR_INTERVAL The interval between package monitor runs (default: 1800). .TP .B \fB--apt-update-interval\fP=APT_UPDATE_INTERVAL The interval between apt update runs (default: 21600). .TP .B \fB--http-proxy\fP=URL The URL of the HTTP proxy, if one is needed. .TP .B \fB--https-proxy\fP=URL The URL of the HTTPS proxy, if one is needed. .TP .B \fB--cloud\fP Set this if this computer is a cloud instance in EC2 or UEC. Read below for details. .TP .B \fB--tags\fP=TAGS Comma separated list of tag names to be sent to the server. .TP .B \fB--import\fP=FILENAME_OR_URL Filename or URL to import configuration from. Imported \fIoptions\fP behave as if they were passed in the command line, with precedence being given to real command line \fIoptions\fP. .TP .B \fB--script-users\fP=USERS A comma-separated list of users to allow scripts to run. To allow scripts to be run by any user, enter: ALL. .TP .B \fB--server-autodiscovery\fP=BOOLEAN Whether to enable server autodiscovery or not. Defaults to "False". .TP .B \fB--autodiscovery-srv-query-string\fP=QUERY The autodiscovery string for DNS SRV queries. Defaults to "_landscape._tcp.localdomain". .TP .B \fB--autodiscover-a-query-string\fP=QUERY The autodiscovery string for DNS A queries. Defaults to "landscape.localdomain". .TP .B \fB--include-manager-plugins\fP=PLUGINS A comma-separated list of manager plugins to load explicitly. .TP .B \fB-n\fP, \fB--no-start\fP Don't start the client automatically. .TP .B \fB--ok-no-register\fP Return exit code 0 instead of 2 if the client can't be registered. .TP .B \fB--silent\fP Run without manual interaction. .TP .B \fB--disable\fP Stop running clients and disable start at boot. .TP .B \fB--otp\fP=OTP The one-time password (OTP) to use in cloud configuration. .SH CLOUD Landscape has some cloud features that become available when the EC2 or UEC machine instance was started using Landscape and the AMI is one of the official ones provided in the Web user interface. We call these instances "Landscape enabled", because they contain a pre-configured landscape-client installed in them which will register the running instance automatically with Landscape as soon as it starts up. .PP You can deploy your own AMI, but if you wish the instance to become "Landscape managed" you need to take a few steps: .IP \(bu 3 make sure the cloud is created in Landscape .IP \(bu 3 either add "CLOUD=1" to /etc/default/landscape-client or use the \fB--cloud\fP switch in the \fBlandscape-config\fP(1) command line .IP \(bu 3 make sure the client is configured to start at boot (i.e., the /etc/default/landscape-client has the line "RUN=1") .PP There is no need to further configure the /etc/landscape/client.conf file with details such as account or key, because when in cloud mode this is all discovered by the client itself. .PP You can avoid this all if you just re-bundle the AMIs we provide. landscape-client is already configured and prepared for the cloud in them. .SH EXAMPLES Register a machine for the first time, or reconfigure an already registered machine, interactively. Command line parameters suppress interaction for provided values. .PP .nf .fam C landscape-config .fam T .fi Register a machine for the first time, or reconfigure an already registered machine, without requiring user interaction. The client will be configured to start on boot automatically: .PP .nf .fam C landscape-config --silent -a account-name -p secret -t `hostname` .fam T .fi Register a machine with the script execution plugin enabled, without requiring user interaction: .PP .nf .fam C landscape-config --silent -a account-name -p secret -t `hostname` --script-users nobody,landscape,root .fam T .fi Register a machine with some tags: .PP .nf .fam C landscape-config --silent -a account-name -p secret -t `hostname` --tags=server,www .fam T .fi To disable a client, stopping current instances and disabling start at bootup: .PP .nf .fam C landscape-config --disable .fam T .fi .SH SEE ALSO landscape-client (1) .SH AUTHOR Landscape Development Team landscape-client-14.01/scripts/0000755000175000017500000000000012301414317016224 5ustar andreasandreaslandscape-client-14.01/scripts/landscape-is-cloud-managed0000755000175000017500000000052712301414317023217 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings hide_warnings() from landscape.broker.registration import is_cloud_managed # We return 0 if it succeeds sys.exit(not is_cloud_managed()) landscape-client-14.01/scripts/landscape-client0000755000175000017500000000076312301414317021366 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings hide_warnings() from landscape.lib.fd import clean_fds # close stray file descriptors now, before we give any other code the chance to # create a critical FD. Even before the reactor is installed! clean_fds() from landscape.watchdog import run if __name__ == "__main__": sys.exit(run()) landscape-client-14.01/scripts/landscape-client-settings-mechanism0000755000175000017500000000103412301414317025156 0ustar andreasandreas#!/usr/bin/env python import dbus from landscape.configuration import LandscapeSetupConfiguration from landscape.ui.model.configuration.mechanism import ( ConfigurationMechanism, INTERFACE_NAME, SERVICE_NAME) from landscape.ui.lib.polkit import listen if __name__ == "__main__": dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SystemBus() bus_name = dbus.service.BusName(SERVICE_NAME, bus) config = LandscapeSetupConfiguration() mechanism = ConfigurationMechanism(config, bus_name) listen() landscape-client-14.01/scripts/landscape-monitor0000755000175000017500000000031012301414317021563 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.monitor.service import run run(sys.argv) landscape-client-14.01/scripts/landscape-dbus-proxy0000644000175000017500000000602112301414317022212 0ustar andreasandreas#!/usr/bin/env python """ This script is needed in case the client is upgrading from a pre-AMP version using Dbus as the messaging mechanism (like the landscape-client package from the lucid archives). This allows the package changer to send package changes to the broker using Dbus. This will only be run for old packages depending on DBus, we don't need to depend on DBus in the current version. """ import os import dbus import dbus.service import dbus.glib # This as side effects, don't remove it! from dbus.service import Object, BusName, method from twisted.internet import glib2reactor glib2reactor.install() from twisted.internet import reactor from landscape.lib.bpickle import loads from landscape.lib.lock import lock_path, LockError from landscape.reactor import LandscapeReactor from landscape.deployment import Configuration from landscape.broker.amp import RemoteBrokerConnector BUS_NAME = "com.canonical.landscape.Broker" OBJECT_PATH = "/com/canonical/landscape/Broker" def array_to_string(array): """Convert an L{Array} of L{Byte}s (or integers) to a Python str.""" result = [] for item in array: if item < 0: item = item + 256 result.append(chr(item)) return "".join(result) class BrokerDBusObject(Object): """A DBus-published object proxying L{RemoteBroker.send_message}. It is used when upgrading from a DBus-based version of the Landscape client to the newer AMP-based one, for letting the old package-changer process performing the upgrade communicate with the new version of the client. """ bus_name = BUS_NAME object_path = OBJECT_PATH def __init__(self, config): super(BrokerDBusObject, self).__init__(BusName( self.bus_name, dbus.SystemBus()), object_path=self.object_path) self.config = config @method(BUS_NAME) def send_message(self, message, urgent=True): """Queue the given message in the message exchange.""" message = loads(array_to_string(message)) def cb_connected(broker): result = broker.send_message(message, urgent=True) return result.addCallback(cb_done) def cb_done(ignored): return reactor.stop() landscape_reactor = LandscapeReactor() connector = RemoteBrokerConnector(landscape_reactor, self.config) connected = connector.connect() connected.addCallback(cb_connected) if __name__ == "__main__": config = Configuration() lock_dir = os.path.join(config.data_path, "package") if os.path.isdir(lock_dir): lock_filename = os.path.join(lock_dir, "changer.lock") try: lock_path(lock_filename) except LockError: # The package-changer is running, this means that we're upgrading from # a non-AMP version and that the upgrade is Landscape driven, so let's # expose the DBus broker proxy to give a chance to the package-changer # to send its result message. remote = BrokerDBusObject(config) reactor.run() landscape-client-14.01/scripts/landscape-package-reporter0000755000175000017500000000035712301414317023342 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.package.reporter import main if __name__ == "__main__": main(sys.argv[1:]) landscape-client-14.01/scripts/landscape-release-upgrader0000755000175000017500000000036612301414317023336 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.package.releaseupgrader import main if __name__ == "__main__": main(sys.argv[1:]) landscape-client-14.01/scripts/landscape-client-registration-mechanism0000755000175000017500000000064612301414317026040 0ustar andreasandreas#!/usr/bin/env python import dbus from landscape.ui.model.registration.mechanism import ( RegistrationMechanism, INTERFACE_NAME, SERVICE_NAME) from landscape.ui.lib.polkit import listen if __name__ == "__main__": dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SystemBus() bus_name = dbus.service.BusName(SERVICE_NAME, bus) mechanism = RegistrationMechanism(bus_name) listen() landscape-client-14.01/scripts/landscape-manager0000755000175000017500000000031012301414317021506 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.manager.service import run run(sys.argv) landscape-client-14.01/scripts/landscape-client-settings-ui0000755000175000017500000000213612301414317023633 0ustar andreasandreas#!/usr/bin/python import os import sys from gettext import bindtextdomain, textdomain script_dir = os.path.abspath("scripts") if os.path.dirname(os.path.abspath(sys.argv[0])) == script_dir: sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings hide_warnings() from landscape.ui.controller.app import SettingsApplicationController from landscape.lib.lock import lock_path, LockError if __name__ == "__main__": lock_file = os.path.join("/var/lock", "landscape-client-settings-ui") try: unlock_path = lock_path(lock_file, timeout=1) except LockError: sys.stderr.write("Another instance of " "landscape-client-settings-ui is already " "running.\n") sys.exit(1) else: bindtextdomain("landscape-client", "/usr/share/locale") textdomain("landscape-client") app = SettingsApplicationController(args=sys.argv[1:]) try: app.run(None) except Exception, e: sys.stderr.write("%s\n" % str(e)) finally: unlock_path() landscape-client-14.01/scripts/landscape-sysinfo0000755000175000017500000000115712301414317021600 0ustar andreasandreas#!/usr/bin/python import sys, os try: if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings hide_warnings() from twisted.internet import reactor from landscape.sysinfo.deployment import run except ImportError: # For some reasons the libraries are not importable for now. We are # probably during an upgrade procedure, so let's exit, expecting the # dependencies to be fixed at next run. sys.exit(2) if __name__ == "__main__": run(sys.argv[1:], reactor) landscape-client-14.01/scripts/landscape-client-ui-install0000755000175000017500000000472012301414317023442 0ustar andreasandreas#!/usr/bin/python import os import dbus import sys from gettext import gettext as _, bindtextdomain, textdomain from gi.repository import GObject, Gtk from aptdaemon.client import AptClient from defer import inline_callbacks from aptdaemon import policykit1 from aptdaemon.gtk3widgets import AptProgressDialog from aptdaemon.errors import NotAuthorizedError script = "/usr/bin/landscape-client-settings-ui" def on_transaction_done(transaction, exit): Gtk.main_quit() # Install may have failed if os.path.exists(script): os.execl(sys.executable, sys.executable, script) @inline_callbacks def install_package(): aptclient = AptClient() bus = dbus.SystemBus() name = bus.get_unique_name() action = policykit1.PK_ACTION_INSTALL_OR_REMOVE_PACKAGES flags = policykit1.CHECK_AUTH_ALLOW_USER_INTERACTION try: yield policykit1.check_authorization_by_name(name, action, flags=flags) except NotAuthorizedError: Gtk.main_quit() transaction = yield aptclient.install_packages(["landscape-client-ui"]) transaction.connect("finished", on_transaction_done) dialog = AptProgressDialog(transaction) dialog.run(close_on_finished=True, show_error=True) def main(): bindtextdomain("landscape-client", "/usr/share/locale") textdomain("landscape-client") dialog = Gtk.MessageDialog( flags=Gtk.DialogFlags.MODAL, type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.CANCEL) link = "%s" % _("Find out more...") dialog.set_markup( "%s\n\n%s\n\n%s\n\n%s" % ( _("Landscape client"), _("Landscape is an easy-to-use commercial systems management and " "monitoring service offered by Canonical that helps " "administrators manage multiple machines efficiently."), link, _("You need to install Landscape client to be able to configure it. " "Do you want to install it now?"))) dialog.set_title(_("Install Landscape client?")) button = dialog.add_button(_("Install"), Gtk.ResponseType.YES) button.grab_focus() result = dialog.run() dialog.destroy() if result == Gtk.ResponseType.YES: install_package() else: Gtk.main_quit() return False if __name__ == "__main__": if not os.path.exists(script): GObject.idle_add(main) Gtk.main() else: os.execl(sys.executable, sys.executable, script) landscape-client-14.01/scripts/landscape-message0000755000175000017500000000027412301414317021531 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.textmessage import run run() landscape-client-14.01/scripts/landscape-config0000755000175000017500000000047212301414317021352 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") else: from landscape.lib.warning import hide_warnings hide_warnings() from landscape.configuration import main if __name__ == "__main__": main(sys.argv[1:]) landscape-client-14.01/scripts/landscape-package-changer0000755000175000017500000000035612301414317023106 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.package.changer import main if __name__ == "__main__": main(sys.argv[1:]) landscape-client-14.01/scripts/landscape-broker0000755000175000017500000000030712301414317021366 0ustar andreasandreas#!/usr/bin/python import sys, os if os.path.dirname(os.path.abspath(sys.argv[0])) == os.path.abspath("scripts"): sys.path.insert(0, "./") from landscape.broker.service import run run(sys.argv)